diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java index b0e7078397e..98ad2dc85ea 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java @@ -73,7 +73,7 @@ public class Action { public static final String START_NAMENODE_TIMEOUT_KEY = "hbase.chaosmonkey.action.startnamenodetimeout"; - protected static final Logger LOG = LoggerFactory.getLogger(Action.class); + private static final Logger LOG = LoggerFactory.getLogger(Action.class); protected static final long KILL_MASTER_TIMEOUT_DEFAULT = PolicyBasedChaosMonkey.TIMEOUT; protected static final long START_MASTER_TIMEOUT_DEFAULT = PolicyBasedChaosMonkey.TIMEOUT; @@ -312,9 +312,11 @@ public class Action { } /** - * Apply a transform to all columns in a given table. If there are no columns in a table or if the context is stopping does nothing. + * Apply a transform to all columns in a given table. If there are no columns in a table + * or if the context is stopping does nothing. * @param tableName the table to modify - * @param transform the modification to perform. Callers will have the column name as a string and a column family builder available to them + * @param transform the modification to perform. Callers will have the + * column name as a string and a column family builder available to them */ protected void modifyAllTableColumns(TableName tableName, BiConsumer transform) throws IOException { HBaseTestingUtility util = this.context.getHBaseIntegrationTestingUtility(); @@ -342,7 +344,8 @@ public class Action { } /** - * Apply a transform to all columns in a given table. If there are no columns in a table or if the context is stopping does nothing. + * Apply a transform to all columns in a given table. + * If there are no columns in a table or if the context is stopping does nothing. * @param tableName the table to modify * @param transform the modification to perform on each column family descriptor builder */ diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java index 3473684a3db..a01222aee94 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java @@ -27,6 +27,8 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action the adds a column family to a table. @@ -34,6 +36,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; public class AddColumnAction extends Action { private final TableName tableName; private Admin admin; + private static final Logger LOG = LoggerFactory.getLogger(AddColumnAction.class); public AddColumnAction(TableName tableName) { this.tableName = tableName; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java index 31ec874ba5b..2026a913a52 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java @@ -21,14 +21,19 @@ package org.apache.hadoop.hbase.chaos.actions; import java.util.HashSet; import java.util.List; import java.util.Set; + import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Restarts a ratio of the running regionservers at the same time */ public class BatchRestartRsAction extends RestartActionBaseAction { float ratio; //ratio of regionservers to restart + private static final Logger LOG = + LoggerFactory.getLogger(BatchRestartRsAction.class); public BatchRestartRsAction(long sleepTime, float ratio) { super(sleepTime); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeBloomFilterAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeBloomFilterAction.java index 55faccf2226..e76675aa4b1 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeBloomFilterAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeBloomFilterAction.java @@ -23,6 +23,8 @@ import java.util.Random; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.BloomFilterUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that tries to adjust the bloom filter setting on all the columns of a @@ -31,6 +33,7 @@ import org.apache.hadoop.hbase.util.BloomFilterUtil; public class ChangeBloomFilterAction extends Action { private final long sleepTime; private final TableName tableName; + private static final Logger LOG = LoggerFactory.getLogger(ChangeBloomFilterAction.class); public ChangeBloomFilterAction(TableName tableName) { this(-1, tableName); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeCompressionAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeCompressionAction.java index 96f31b2ed27..533028428f6 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeCompressionAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeCompressionAction.java @@ -18,12 +18,14 @@ package org.apache.hadoop.hbase.chaos.actions; +import java.io.IOException; +import java.util.Random; + import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.io.compress.Compressor; - -import java.io.IOException; -import java.util.Random; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that changes the compression algorithm on a column family from a list of tables. @@ -31,6 +33,7 @@ import java.util.Random; public class ChangeCompressionAction extends Action { private final TableName tableName; private final Random random; + private static final Logger LOG = LoggerFactory.getLogger(ChangeCompressionAction.class); public ChangeCompressionAction(TableName tableName) { this.tableName = tableName; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java index bce76940754..55a308ce386 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java @@ -18,18 +18,21 @@ package org.apache.hadoop.hbase.chaos.actions; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; - import java.io.IOException; import java.util.Random; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * Action that changes the encoding on a column family from a list of tables. */ public class ChangeEncodingAction extends Action { private final TableName tableName; private final Random random; + private static final Logger LOG = LoggerFactory.getLogger(ChangeEncodingAction.class); public ChangeEncodingAction(TableName tableName) { this.tableName = tableName; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.java index b4cf6a36f6d..72ff50fc092 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.chaos.actions; +import java.util.Random; + import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -25,10 +27,11 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.IncreasingToUpperBoundRegionSplitPolicy; - -import java.util.Random; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ChangeSplitPolicyAction extends Action { + private static final Logger LOG = LoggerFactory.getLogger(ChangeSplitPolicyAction.class); private final TableName tableName; private final String[] possiblePolicies; private final Random random; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeVersionsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeVersionsAction.java index 69bd84be72e..18dba94a303 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeVersionsAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeVersionsAction.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.util.Random; import org.apache.hadoop.hbase.TableName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that changes the number of versions on a column family from a list of tables. @@ -30,6 +32,7 @@ import org.apache.hadoop.hbase.TableName; */ public class ChangeVersionsAction extends Action { private final TableName tableName; + private static final Logger LOG = LoggerFactory.getLogger(ChangeVersionsAction.class); private Random random; @@ -44,7 +47,7 @@ public class ChangeVersionsAction extends Action { LOG.debug("Performing action: Changing versions on " + tableName + " to " + versions); modifyAllTableColumns(tableName, columnBuilder -> { - columnBuilder.setMinVersions(versions).setMaxVersions(versions); + columnBuilder.setMinVersions(versions).setMaxVersions(versions); }); LOG.debug("Performing action: Just changed versions on " + tableName); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactMobAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactMobAction.java index 7e812a8f553..6ac71d5739a 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactMobAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactMobAction.java @@ -23,6 +23,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.CompactType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that queues a table compaction. @@ -31,6 +33,7 @@ public class CompactMobAction extends Action { private final TableName tableName; private final int majorRatio; private final long sleepTime; + private static final Logger LOG = LoggerFactory.getLogger(CompactMobAction.class); public CompactMobAction(TableName tableName, float majorRatio) { this(-1, tableName, majorRatio); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactRandomRegionOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactRandomRegionOfTableAction.java index 548966823df..9a46147014c 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactRandomRegionOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactRandomRegionOfTableAction.java @@ -19,12 +19,15 @@ package org.apache.hadoop.hbase.chaos.actions; import java.util.List; + import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Region that queues a compaction of a random region from the table. @@ -33,6 +36,8 @@ public class CompactRandomRegionOfTableAction extends Action { private final int majorRatio; private final long sleepTime; private final TableName tableName; + private static final Logger LOG = + LoggerFactory.getLogger(CompactRandomRegionOfTableAction.class); public CompactRandomRegionOfTableAction( TableName tableName, float majorRatio) { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactTableAction.java index 521591c4673..c05d43b89f5 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactTableAction.java @@ -22,6 +22,8 @@ import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that queues a table compaction. @@ -30,6 +32,7 @@ public class CompactTableAction extends Action { private final TableName tableName; private final int majorRatio; private final long sleepTime; + private static final Logger LOG = LoggerFactory.getLogger(CompactTableAction.class); public CompactTableAction(TableName tableName, float majorRatio) { this(-1, tableName, majorRatio); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DumpClusterStatusAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DumpClusterStatusAction.java index 36b8530c8f9..3c70af0349c 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DumpClusterStatusAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DumpClusterStatusAction.java @@ -20,10 +20,15 @@ package org.apache.hadoop.hbase.chaos.actions; import java.io.IOException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * Action to dump the cluster status. */ public class DumpClusterStatusAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(DumpClusterStatusAction.class); @Override public void init(ActionContext context) throws IOException { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushRandomRegionOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushRandomRegionOfTableAction.java index be15a122ab6..083423c4ca9 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushRandomRegionOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushRandomRegionOfTableAction.java @@ -24,16 +24,20 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that tries to flush a random region of a table. */ public class FlushRandomRegionOfTableAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(FlushRandomRegionOfTableAction.class); private final long sleepTime; private final TableName tableName; public FlushRandomRegionOfTableAction(TableName tableName) { - this (-1, tableName); + this (-1, tableName); } public FlushRandomRegionOfTableAction(int sleepTime, TableName tableName) { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushTableAction.java index 50fffead951..93f7347ac93 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/FlushTableAction.java @@ -21,11 +21,15 @@ package org.apache.hadoop.hbase.chaos.actions; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that tries to flush a table. */ public class FlushTableAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(FlushTableAction.class); private final long sleepTime; private final TableName tableName; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ForceBalancerAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ForceBalancerAction.java index 04f389a9519..d75475432a1 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ForceBalancerAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ForceBalancerAction.java @@ -18,10 +18,16 @@ package org.apache.hadoop.hbase.chaos.actions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * Action that tries to force a balancer run. */ public class ForceBalancerAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(ForceBalancerAction.class); + @Override public void perform() throws Exception { // Don't try the flush if we're stopping diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MergeRandomAdjacentRegionsOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MergeRandomAdjacentRegionsOfTableAction.java index 9a5b057c13b..085413d6e6f 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MergeRandomAdjacentRegionsOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MergeRandomAdjacentRegionsOfTableAction.java @@ -24,11 +24,15 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action to merge regions of a table. */ public class MergeRandomAdjacentRegionsOfTableAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(MergeRandomAdjacentRegionsOfTableAction.class); private final TableName tableName; private final long sleepTime; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java index 09bfe216acb..cdd2f20aed8 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java @@ -25,11 +25,15 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that tries to move a random region of a table. */ public class MoveRandomRegionOfTableAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(MoveRandomRegionOfTableAction.class); private final long sleepTime; private final TableName tableName; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java index 7bf968bc2ab..1c1f07b3df5 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java @@ -22,17 +22,22 @@ import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.List; + import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that tries to move every region of a table. */ public class MoveRegionsOfTableAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(MoveRegionsOfTableAction.class); private final long sleepTime; private final TableName tableName; private final long maxTime; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java index 1098ff9a21f..37e540104d6 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java @@ -28,11 +28,15 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.util.Bytes; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that removes a column family. */ public class RemoveColumnAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(RemoveColumnAction.class); private final TableName tableName; private final Set protectedColumns; private Admin admin; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActionBaseAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActionBaseAction.java index d964d6272d5..c53de908226 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActionBaseAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActionBaseAction.java @@ -22,11 +22,15 @@ import java.io.IOException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Threads; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Base class for restarting HBaseServer's */ public class RestartActionBaseAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(RestartActionBaseAction.class); long sleepTime; // how long should we sleep public RestartActionBaseAction(long sleepTime) { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveMasterAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveMasterAction.java index 89415b95fe5..ddd719a6e67 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveMasterAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveMasterAction.java @@ -19,11 +19,15 @@ package org.apache.hadoop.hbase.chaos.actions; import org.apache.hadoop.hbase.ServerName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that tries to restart the active master. */ public class RestartActiveMasterAction extends RestartActionBaseAction { + private static final Logger LOG = LoggerFactory.getLogger(RestartActionBaseAction.class); + public RestartActiveMasterAction(long sleepTime) { super(sleepTime); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveNameNodeAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveNameNodeAction.java index 645743a7faf..2fc64e68a2c 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveNameNodeAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartActiveNameNodeAction.java @@ -30,11 +30,15 @@ import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.server.namenode.ha.proto.HAZKInfoProtos.ActiveNodeInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that tries to restart the active namenode. */ public class RestartActiveNameNodeAction extends RestartActionBaseAction { + private static final Logger LOG = + LoggerFactory.getLogger(RestartActiveNameNodeAction.class); // Value taken from org.apache.hadoop.ha.ActiveStandbyElector.java, variable :- LOCK_FILENAME private static final String ACTIVE_NN_LOCK_NAME = "ActiveStandbyElectorLock"; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomDataNodeAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomDataNodeAction.java index f5349dc1b6c..5824a42fca8 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomDataNodeAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomDataNodeAction.java @@ -18,6 +18,10 @@ package org.apache.hadoop.hbase.chaos.actions; +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; + import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.util.FSUtils; @@ -25,15 +29,15 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; - -import java.io.IOException; -import java.util.LinkedList; -import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that restarts a random datanode. */ public class RestartRandomDataNodeAction extends RestartActionBaseAction { + private static final Logger LOG = LoggerFactory.getLogger(RestartRandomDataNodeAction.class); + public RestartRandomDataNodeAction(long sleepTime) { super(sleepTime); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomRsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomRsAction.java index 7b09dd31051..48458b68dcf 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomRsAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomRsAction.java @@ -20,11 +20,15 @@ package org.apache.hadoop.hbase.chaos.actions; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that restarts a random HRegionServer */ public class RestartRandomRsAction extends RestartActionBaseAction { + private static final Logger LOG = LoggerFactory.getLogger(RestartRandomRsAction.class); + public RestartRandomRsAction(long sleepTime) { super(sleepTime); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomZKNodeAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomZKNodeAction.java index ae574a0b7af..3ed7a0d9237 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomZKNodeAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomZKNodeAction.java @@ -21,11 +21,15 @@ package org.apache.hadoop.hbase.chaos.actions; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.zookeeper.ZKServerTool; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that restarts a random zookeeper node. */ public class RestartRandomZKNodeAction extends RestartActionBaseAction { + private static final Logger LOG = LoggerFactory.getLogger(RestartRandomZKNodeAction.class); + public RestartRandomZKNodeAction(long sleepTime) { super(sleepTime); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingMetaAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingMetaAction.java index f17b8068f51..aeecf0a891f 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingMetaAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingMetaAction.java @@ -20,11 +20,16 @@ package org.apache.hadoop.hbase.chaos.actions; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ServerName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that tries to restart the HRegionServer holding Meta. */ public class RestartRsHoldingMetaAction extends RestartActionBaseAction { + private static final Logger LOG = + LoggerFactory.getLogger(RestartRsHoldingMetaAction.class); + public RestartRsHoldingMetaAction(long sleepTime) { super(sleepTime); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingTableAction.java index 2c9a872cc7e..a63c672db39 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingTableAction.java @@ -23,11 +23,15 @@ import java.util.List; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.client.RegionLocator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that restarts an HRegionServer holding one of the regions of the table. */ public class RestartRsHoldingTableAction extends RestartActionBaseAction { + private static final Logger LOG = + LoggerFactory.getLogger(RestartRsHoldingTableAction.class); private final RegionLocator locator; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java index a5665273211..53f95208343 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java @@ -32,8 +32,9 @@ import org.slf4j.LoggerFactory; /** * Restarts a ratio of the regionservers in a rolling fashion. At each step, either kills a - * server, or starts one, sleeping randomly (0-sleepTime) in between steps. The parameter maxDeadServers - * limits the maximum number of servers that can be down at the same time during rolling restarts. + * server, or starts one, sleeping randomly (0-sleepTime) in between steps. + * The parameter maxDeadServers limits the maximum number of servers that + * can be down at the same time during rolling restarts. */ public class RollingBatchRestartRsAction extends BatchRestartRsAction { private static final Logger LOG = LoggerFactory.getLogger(RollingBatchRestartRsAction.class); @@ -81,27 +82,27 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction { ServerName server; switch (action) { - case KILL: - server = serversToBeKilled.remove(); - try { - killRs(server); - } catch (org.apache.hadoop.util.Shell.ExitCodeException e) { - // We've seen this in test runs where we timeout but the kill went through. HBASE-9743 - // So, add to deadServers even if exception so the start gets called. - LOG.info("Problem killing but presume successful; code=" + e.getExitCode(), e); - } - deadServers.add(server); - break; - case START: - try { - server = deadServers.remove(); - startRs(server); - } catch (org.apache.hadoop.util.Shell.ExitCodeException e) { - // The start may fail but better to just keep going though we may lose server. - // - LOG.info("Problem starting, will retry; code=" + e.getExitCode(), e); - } - break; + case KILL: + server = serversToBeKilled.remove(); + try { + killRs(server); + } catch (org.apache.hadoop.util.Shell.ExitCodeException e) { + // We've seen this in test runs where we timeout but the kill went through. HBASE-9743 + // So, add to deadServers even if exception so the start gets called. + LOG.info("Problem killing but presume successful; code=" + e.getExitCode(), e); + } + deadServers.add(server); + break; + case START: + try { + server = deadServers.remove(); + startRs(server); + } catch (org.apache.hadoop.util.Shell.ExitCodeException e) { + // The start may fail but better to just keep going though we may lose server. + // + LOG.info("Problem starting, will retry; code=" + e.getExitCode(), e); + } + break; } sleep(RandomUtils.nextInt(0, (int)sleepTime)); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java index 4bd9d1d99ca..6ee9b57b8a7 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java @@ -21,11 +21,15 @@ package org.apache.hadoop.hbase.chaos.actions; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that tries to take a snapshot of a table. */ public class SnapshotTableAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(SnapshotTableAction.class); private final TableName tableName; private final long sleepTime; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitAllRegionOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitAllRegionOfTableAction.java index a924f40b063..10d54405160 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitAllRegionOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitAllRegionOfTableAction.java @@ -17,15 +17,19 @@ */ package org.apache.hadoop.hbase.chaos.actions; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; - import java.io.IOException; import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + public class SplitAllRegionOfTableAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(SplitAllRegionOfTableAction.class); private static final int DEFAULT_MAX_SPLITS = 3; private static final String MAX_SPLIT_KEY = "hbase.chaosmonkey.action.maxFullTableSplits"; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitRandomRegionOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitRandomRegionOfTableAction.java index 7892a0a7066..b4873753bb3 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitRandomRegionOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SplitRandomRegionOfTableAction.java @@ -19,16 +19,21 @@ package org.apache.hadoop.hbase.chaos.actions; import java.util.List; + import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that tries to split a random region of a table. */ public class SplitRandomRegionOfTableAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(SplitRandomRegionOfTableAction.class); private final long sleepTime; private final TableName tableName; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/TruncateTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/TruncateTableAction.java index 4903a07fbff..b79dafa14e8 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/TruncateTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/TruncateTableAction.java @@ -21,13 +21,17 @@ package org.apache.hadoop.hbase.chaos.actions; import java.util.Random; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that tries to truncate of a table. */ public class TruncateTableAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(TruncateTableAction.class); private final TableName tableName; private final Random random; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java index cab5aa25d04..5e5504af10e 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java @@ -23,13 +23,18 @@ import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Set; + import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ServerName; import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** This action is too specific to put in ChaosMonkey; put it here */ public class UnbalanceKillAndRebalanceAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(UnbalanceKillAndRebalanceAction.class); /** Fractions of servers to get regions and live and die respectively; from all other * servers, HOARD_FRC_OF_REGIONS will be removed to the above randomly */ private static final double FRC_SERVERS_THAT_HOARD_AND_LIVE = 0.1; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceRegionsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceRegionsAction.java index f85ff0482ed..3f2a3a1b518 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceRegionsAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceRegionsAction.java @@ -21,14 +21,19 @@ package org.apache.hadoop.hbase.chaos.actions; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; + import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ServerName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Action that tries to unbalance the regions of a cluster. */ public class UnbalanceRegionsAction extends Action { + private static final Logger LOG = + LoggerFactory.getLogger(UnbalanceRegionsAction.class); private double fractionOfRegions; private double fractionOfServers;