HBASE-18439 Subclasses of o.a.h.h.chaos.actions.Action all use the same logger
Signed-off-by: Jan Hentschel <jan.hentschel@ultratendency.com> Signed-off-by: Guangxu Cheng <gxcheng@apache.org>
This commit is contained in:
parent
d5dcc090c1
commit
08aae42156
|
@ -73,7 +73,7 @@ public class Action {
|
|||
public static final String START_NAMENODE_TIMEOUT_KEY =
|
||||
"hbase.chaosmonkey.action.startnamenodetimeout";
|
||||
|
||||
protected static final Logger LOG = LoggerFactory.getLogger(Action.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(Action.class);
|
||||
|
||||
protected static final long KILL_MASTER_TIMEOUT_DEFAULT = PolicyBasedChaosMonkey.TIMEOUT;
|
||||
protected static final long START_MASTER_TIMEOUT_DEFAULT = PolicyBasedChaosMonkey.TIMEOUT;
|
||||
|
@ -312,9 +312,11 @@ public class Action {
|
|||
}
|
||||
|
||||
/**
|
||||
* Apply a transform to all columns in a given table. If there are no columns in a table or if the context is stopping does nothing.
|
||||
* Apply a transform to all columns in a given table. If there are no columns in a table
|
||||
* or if the context is stopping does nothing.
|
||||
* @param tableName the table to modify
|
||||
* @param transform the modification to perform. Callers will have the column name as a string and a column family builder available to them
|
||||
* @param transform the modification to perform. Callers will have the
|
||||
* column name as a string and a column family builder available to them
|
||||
*/
|
||||
protected void modifyAllTableColumns(TableName tableName, BiConsumer<String, ColumnFamilyDescriptorBuilder> transform) throws IOException {
|
||||
HBaseTestingUtility util = this.context.getHBaseIntegrationTestingUtility();
|
||||
|
@ -342,7 +344,8 @@ public class Action {
|
|||
}
|
||||
|
||||
/**
|
||||
* Apply a transform to all columns in a given table. If there are no columns in a table or if the context is stopping does nothing.
|
||||
* Apply a transform to all columns in a given table.
|
||||
* If there are no columns in a table or if the context is stopping does nothing.
|
||||
* @param tableName the table to modify
|
||||
* @param transform the modification to perform on each column family descriptor builder
|
||||
*/
|
||||
|
|
|
@ -27,6 +27,8 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
|||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action the adds a column family to a table.
|
||||
|
@ -34,6 +36,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
|||
public class AddColumnAction extends Action {
|
||||
private final TableName tableName;
|
||||
private Admin admin;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(AddColumnAction.class);
|
||||
|
||||
public AddColumnAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
|
|
|
@ -21,14 +21,19 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Restarts a ratio of the running regionservers at the same time
|
||||
*/
|
||||
public class BatchRestartRsAction extends RestartActionBaseAction {
|
||||
float ratio; //ratio of regionservers to restart
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(BatchRestartRsAction.class);
|
||||
|
||||
public BatchRestartRsAction(long sleepTime, float ratio) {
|
||||
super(sleepTime);
|
||||
|
|
|
@ -23,6 +23,8 @@ import java.util.Random;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||
import org.apache.hadoop.hbase.util.BloomFilterUtil;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that tries to adjust the bloom filter setting on all the columns of a
|
||||
|
@ -31,6 +33,7 @@ import org.apache.hadoop.hbase.util.BloomFilterUtil;
|
|||
public class ChangeBloomFilterAction extends Action {
|
||||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ChangeBloomFilterAction.class);
|
||||
|
||||
public ChangeBloomFilterAction(TableName tableName) {
|
||||
this(-1, tableName);
|
||||
|
|
|
@ -18,12 +18,14 @@
|
|||
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that changes the compression algorithm on a column family from a list of tables.
|
||||
|
@ -31,6 +33,7 @@ import java.util.Random;
|
|||
public class ChangeCompressionAction extends Action {
|
||||
private final TableName tableName;
|
||||
private final Random random;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ChangeCompressionAction.class);
|
||||
|
||||
public ChangeCompressionAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
|
|
|
@ -18,18 +18,21 @@
|
|||
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that changes the encoding on a column family from a list of tables.
|
||||
*/
|
||||
public class ChangeEncodingAction extends Action {
|
||||
private final TableName tableName;
|
||||
private final Random random;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ChangeEncodingAction.class);
|
||||
|
||||
public ChangeEncodingAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
|
@ -25,10 +27,11 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
|||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||
import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
|
||||
import org.apache.hadoop.hbase.regionserver.IncreasingToUpperBoundRegionSplitPolicy;
|
||||
|
||||
import java.util.Random;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class ChangeSplitPolicyAction extends Action {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ChangeSplitPolicyAction.class);
|
||||
private final TableName tableName;
|
||||
private final String[] possiblePolicies;
|
||||
private final Random random;
|
||||
|
|
|
@ -22,6 +22,8 @@ import java.io.IOException;
|
|||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that changes the number of versions on a column family from a list of tables.
|
||||
|
@ -30,6 +32,7 @@ import org.apache.hadoop.hbase.TableName;
|
|||
*/
|
||||
public class ChangeVersionsAction extends Action {
|
||||
private final TableName tableName;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ChangeVersionsAction.class);
|
||||
|
||||
private Random random;
|
||||
|
||||
|
@ -44,7 +47,7 @@ public class ChangeVersionsAction extends Action {
|
|||
|
||||
LOG.debug("Performing action: Changing versions on " + tableName + " to " + versions);
|
||||
modifyAllTableColumns(tableName, columnBuilder -> {
|
||||
columnBuilder.setMinVersions(versions).setMaxVersions(versions);
|
||||
columnBuilder.setMinVersions(versions).setMaxVersions(versions);
|
||||
});
|
||||
LOG.debug("Performing action: Just changed versions on " + tableName);
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.CompactType;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that queues a table compaction.
|
||||
|
@ -31,6 +33,7 @@ public class CompactMobAction extends Action {
|
|||
private final TableName tableName;
|
||||
private final int majorRatio;
|
||||
private final long sleepTime;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(CompactMobAction.class);
|
||||
|
||||
public CompactMobAction(TableName tableName, float majorRatio) {
|
||||
this(-1, tableName, majorRatio);
|
||||
|
|
|
@ -19,12 +19,15 @@
|
|||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Region that queues a compaction of a random region from the table.
|
||||
|
@ -33,6 +36,8 @@ public class CompactRandomRegionOfTableAction extends Action {
|
|||
private final int majorRatio;
|
||||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(CompactRandomRegionOfTableAction.class);
|
||||
|
||||
public CompactRandomRegionOfTableAction(
|
||||
TableName tableName, float majorRatio) {
|
||||
|
|
|
@ -22,6 +22,8 @@ import org.apache.commons.lang3.RandomUtils;
|
|||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that queues a table compaction.
|
||||
|
@ -30,6 +32,7 @@ public class CompactTableAction extends Action {
|
|||
private final TableName tableName;
|
||||
private final int majorRatio;
|
||||
private final long sleepTime;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(CompactTableAction.class);
|
||||
|
||||
public CompactTableAction(TableName tableName, float majorRatio) {
|
||||
this(-1, tableName, majorRatio);
|
||||
|
|
|
@ -20,10 +20,15 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action to dump the cluster status.
|
||||
*/
|
||||
public class DumpClusterStatusAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(DumpClusterStatusAction.class);
|
||||
|
||||
@Override
|
||||
public void init(ActionContext context) throws IOException {
|
||||
|
|
|
@ -24,16 +24,20 @@ import org.apache.hadoop.hbase.TableName;
|
|||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that tries to flush a random region of a table.
|
||||
*/
|
||||
public class FlushRandomRegionOfTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(FlushRandomRegionOfTableAction.class);
|
||||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
|
||||
public FlushRandomRegionOfTableAction(TableName tableName) {
|
||||
this (-1, tableName);
|
||||
this (-1, tableName);
|
||||
}
|
||||
|
||||
public FlushRandomRegionOfTableAction(int sleepTime, TableName tableName) {
|
||||
|
|
|
@ -21,11 +21,15 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that tries to flush a table.
|
||||
*/
|
||||
public class FlushTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(FlushTableAction.class);
|
||||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
|
||||
|
|
|
@ -18,10 +18,16 @@
|
|||
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that tries to force a balancer run.
|
||||
*/
|
||||
public class ForceBalancerAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(ForceBalancerAction.class);
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
// Don't try the flush if we're stopping
|
||||
|
|
|
@ -24,11 +24,15 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action to merge regions of a table.
|
||||
*/
|
||||
public class MergeRandomAdjacentRegionsOfTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(MergeRandomAdjacentRegionsOfTableAction.class);
|
||||
private final TableName tableName;
|
||||
private final long sleepTime;
|
||||
|
||||
|
|
|
@ -25,11 +25,15 @@ import org.apache.hadoop.hbase.TableName;
|
|||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that tries to move a random region of a table.
|
||||
*/
|
||||
public class MoveRandomRegionOfTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(MoveRandomRegionOfTableAction.class);
|
||||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
|
||||
|
|
|
@ -22,17 +22,22 @@ import java.io.IOException;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that tries to move every region of a table.
|
||||
*/
|
||||
public class MoveRegionsOfTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(MoveRegionsOfTableAction.class);
|
||||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
private final long maxTime;
|
||||
|
|
|
@ -28,11 +28,15 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
|||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that removes a column family.
|
||||
*/
|
||||
public class RemoveColumnAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(RemoveColumnAction.class);
|
||||
private final TableName tableName;
|
||||
private final Set<String> protectedColumns;
|
||||
private Admin admin;
|
||||
|
|
|
@ -22,11 +22,15 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Base class for restarting HBaseServer's
|
||||
*/
|
||||
public class RestartActionBaseAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(RestartActionBaseAction.class);
|
||||
long sleepTime; // how long should we sleep
|
||||
|
||||
public RestartActionBaseAction(long sleepTime) {
|
||||
|
|
|
@ -19,11 +19,15 @@
|
|||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that tries to restart the active master.
|
||||
*/
|
||||
public class RestartActiveMasterAction extends RestartActionBaseAction {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RestartActionBaseAction.class);
|
||||
|
||||
public RestartActiveMasterAction(long sleepTime) {
|
||||
super(sleepTime);
|
||||
}
|
||||
|
|
|
@ -30,11 +30,15 @@ import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
|
|||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.proto.HAZKInfoProtos.ActiveNodeInfo;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that tries to restart the active namenode.
|
||||
*/
|
||||
public class RestartActiveNameNodeAction extends RestartActionBaseAction {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(RestartActiveNameNodeAction.class);
|
||||
|
||||
// Value taken from org.apache.hadoop.ha.ActiveStandbyElector.java, variable :- LOCK_FILENAME
|
||||
private static final String ACTIVE_NN_LOCK_NAME = "ActiveStandbyElectorLock";
|
||||
|
|
|
@ -18,6 +18,10 @@
|
|||
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
@ -25,15 +29,15 @@ import org.apache.hadoop.hdfs.DFSClient;
|
|||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that restarts a random datanode.
|
||||
*/
|
||||
public class RestartRandomDataNodeAction extends RestartActionBaseAction {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RestartRandomDataNodeAction.class);
|
||||
|
||||
public RestartRandomDataNodeAction(long sleepTime) {
|
||||
super(sleepTime);
|
||||
}
|
||||
|
|
|
@ -20,11 +20,15 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that restarts a random HRegionServer
|
||||
*/
|
||||
public class RestartRandomRsAction extends RestartActionBaseAction {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RestartRandomRsAction.class);
|
||||
|
||||
public RestartRandomRsAction(long sleepTime) {
|
||||
super(sleepTime);
|
||||
}
|
||||
|
|
|
@ -21,11 +21,15 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKServerTool;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that restarts a random zookeeper node.
|
||||
*/
|
||||
public class RestartRandomZKNodeAction extends RestartActionBaseAction {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RestartRandomZKNodeAction.class);
|
||||
|
||||
public RestartRandomZKNodeAction(long sleepTime) {
|
||||
super(sleepTime);
|
||||
}
|
||||
|
|
|
@ -20,11 +20,16 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
|
||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that tries to restart the HRegionServer holding Meta.
|
||||
*/
|
||||
public class RestartRsHoldingMetaAction extends RestartActionBaseAction {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(RestartRsHoldingMetaAction.class);
|
||||
|
||||
public RestartRsHoldingMetaAction(long sleepTime) {
|
||||
super(sleepTime);
|
||||
}
|
||||
|
|
|
@ -23,11 +23,15 @@ import java.util.List;
|
|||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that restarts an HRegionServer holding one of the regions of the table.
|
||||
*/
|
||||
public class RestartRsHoldingTableAction extends RestartActionBaseAction {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(RestartRsHoldingTableAction.class);
|
||||
|
||||
private final RegionLocator locator;
|
||||
|
||||
|
|
|
@ -32,8 +32,9 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
/**
|
||||
* Restarts a ratio of the regionservers in a rolling fashion. At each step, either kills a
|
||||
* server, or starts one, sleeping randomly (0-sleepTime) in between steps. The parameter maxDeadServers
|
||||
* limits the maximum number of servers that can be down at the same time during rolling restarts.
|
||||
* server, or starts one, sleeping randomly (0-sleepTime) in between steps.
|
||||
* The parameter maxDeadServers limits the maximum number of servers that
|
||||
* can be down at the same time during rolling restarts.
|
||||
*/
|
||||
public class RollingBatchRestartRsAction extends BatchRestartRsAction {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RollingBatchRestartRsAction.class);
|
||||
|
@ -81,27 +82,27 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction {
|
|||
ServerName server;
|
||||
|
||||
switch (action) {
|
||||
case KILL:
|
||||
server = serversToBeKilled.remove();
|
||||
try {
|
||||
killRs(server);
|
||||
} catch (org.apache.hadoop.util.Shell.ExitCodeException e) {
|
||||
// We've seen this in test runs where we timeout but the kill went through. HBASE-9743
|
||||
// So, add to deadServers even if exception so the start gets called.
|
||||
LOG.info("Problem killing but presume successful; code=" + e.getExitCode(), e);
|
||||
}
|
||||
deadServers.add(server);
|
||||
break;
|
||||
case START:
|
||||
try {
|
||||
server = deadServers.remove();
|
||||
startRs(server);
|
||||
} catch (org.apache.hadoop.util.Shell.ExitCodeException e) {
|
||||
// The start may fail but better to just keep going though we may lose server.
|
||||
//
|
||||
LOG.info("Problem starting, will retry; code=" + e.getExitCode(), e);
|
||||
}
|
||||
break;
|
||||
case KILL:
|
||||
server = serversToBeKilled.remove();
|
||||
try {
|
||||
killRs(server);
|
||||
} catch (org.apache.hadoop.util.Shell.ExitCodeException e) {
|
||||
// We've seen this in test runs where we timeout but the kill went through. HBASE-9743
|
||||
// So, add to deadServers even if exception so the start gets called.
|
||||
LOG.info("Problem killing but presume successful; code=" + e.getExitCode(), e);
|
||||
}
|
||||
deadServers.add(server);
|
||||
break;
|
||||
case START:
|
||||
try {
|
||||
server = deadServers.remove();
|
||||
startRs(server);
|
||||
} catch (org.apache.hadoop.util.Shell.ExitCodeException e) {
|
||||
// The start may fail but better to just keep going though we may lose server.
|
||||
//
|
||||
LOG.info("Problem starting, will retry; code=" + e.getExitCode(), e);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
sleep(RandomUtils.nextInt(0, (int)sleepTime));
|
||||
|
|
|
@ -21,11 +21,15 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that tries to take a snapshot of a table.
|
||||
*/
|
||||
public class SnapshotTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(SnapshotTableAction.class);
|
||||
private final TableName tableName;
|
||||
private final long sleepTime;
|
||||
|
||||
|
|
|
@ -17,15 +17,19 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
||||
public class SplitAllRegionOfTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(SplitAllRegionOfTableAction.class);
|
||||
private static final int DEFAULT_MAX_SPLITS = 3;
|
||||
private static final String MAX_SPLIT_KEY = "hbase.chaosmonkey.action.maxFullTableSplits";
|
||||
|
||||
|
|
|
@ -19,16 +19,21 @@
|
|||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that tries to split a random region of a table.
|
||||
*/
|
||||
public class SplitRandomRegionOfTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(SplitRandomRegionOfTableAction.class);
|
||||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
|
||||
|
|
|
@ -21,13 +21,17 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that tries to truncate of a table.
|
||||
*/
|
||||
public class TruncateTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(TruncateTableAction.class);
|
||||
private final TableName tableName;
|
||||
private final Random random;
|
||||
|
||||
|
|
|
@ -23,13 +23,18 @@ import java.util.HashSet;
|
|||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.junit.Assert;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** This action is too specific to put in ChaosMonkey; put it here */
|
||||
public class UnbalanceKillAndRebalanceAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(UnbalanceKillAndRebalanceAction.class);
|
||||
/** Fractions of servers to get regions and live and die respectively; from all other
|
||||
* servers, HOARD_FRC_OF_REGIONS will be removed to the above randomly */
|
||||
private static final double FRC_SERVERS_THAT_HOARD_AND_LIVE = 0.1;
|
||||
|
|
|
@ -21,14 +21,19 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import java.util.ArrayList;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Action that tries to unbalance the regions of a cluster.
|
||||
*/
|
||||
public class UnbalanceRegionsAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(UnbalanceRegionsAction.class);
|
||||
private double fractionOfRegions;
|
||||
private double fractionOfServers;
|
||||
|
||||
|
|
Loading…
Reference in New Issue