diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java index 2b77ad5f588..381aff7d501 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java @@ -167,7 +167,7 @@ public class IntegrationTestLazyCfLoading { scf.setFilterIfMissing(true); return scf; } - }; + } @Before public void setUp() throws Exception { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java index d1587fad1d4..d6bc2e22b30 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java @@ -85,7 +85,7 @@ public class IntegrationTestManyRegions { LOG.info(String.format("Deleting existing table %s.", TABLE_NAME)); if (admin.isTableEnabled(TABLE_NAME)) admin.disableTable(TABLE_NAME); admin.deleteTable(TABLE_NAME); - LOG.info(String.format("Existing table %s deleted.")); + LOG.info(String.format("Existing table %s deleted.", TABLE_NAME)); } LOG.info("Cluster ready"); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java index d3f41ad97f0..436bbe1c6d0 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java @@ -102,8 +102,8 @@ public class IntegrationTestsDriver extends AbstractHBaseTool { IntegrationTestingUtility.setUseDistributedCluster(conf); Class>[] classes = findIntegrationTestClasses(); LOG.info("Found " + classes.length + " integration tests to run:"); - for (int i = 0; i < classes.length; i++) { - LOG.info(" " + classes[i]); + for (Class> aClass : classes) { + LOG.info(" " + aClass); } JUnitCore junit = new JUnitCore(); junit.addListener(new TextListener(System.out)); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java index f9c33831cef..6900291e339 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java @@ -56,7 +56,7 @@ public class Action { initialServers = regionServers.toArray(new ServerName[regionServers.size()]); } - public void perform() throws Exception { }; + public void perform() throws Exception { } /** Returns current region servers */ protected ServerName[] getCurrentServers() throws IOException { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java index 82afed931a5..aa8a35c2d5b 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java @@ -31,13 +31,11 @@ import org.apache.hadoop.hbase.util.Bytes; * Action the adds a column family to a table. */ public class AddColumnAction extends Action { - private final Random random; private final byte[] tableName; private HBaseAdmin admin; public AddColumnAction(String tableName) { this.tableName = Bytes.toBytes(tableName); - this.random = new Random(); } @Override diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactTableAction.java index 0fae29b08a7..c1f83d119ed 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactTableAction.java @@ -24,8 +24,8 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.util.Bytes; /** -* Created by eclark on 8/12/13. -*/ + * Action that queues a table compaction. + */ public class CompactTableAction extends Action { private final byte[] tableNameBytes; private final int majorRatio; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java index 17e0bd6936f..583dc55f698 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java @@ -58,7 +58,8 @@ public class RemoveColumnAction extends Action { } int index = random.nextInt(columnDescriptors.length); - while(protectedColumns.contains(columnDescriptors[index].getNameAsString())) { + while(protectedColumns != null && + protectedColumns.contains(columnDescriptors[index].getNameAsString())) { index = random.nextInt(columnDescriptors.length); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomRsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomRsAction.java index 18cdf4ad8fc..7b09dd31051 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomRsAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomRsAction.java @@ -22,8 +22,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; /** -* Created by eclark on 8/12/13. -*/ + * Action that restarts a random HRegionServer + */ public class RestartRandomRsAction extends RestartActionBaseAction { public RestartRandomRsAction(long sleepTime) { super(sleepTime); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/ChaosMonkey.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/ChaosMonkey.java index a1cbf161e13..da75c3be480 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/ChaosMonkey.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/ChaosMonkey.java @@ -20,6 +20,27 @@ package org.apache.hadoop.hbase.chaos.monkies; import org.apache.hadoop.hbase.Stoppable; +/** + * A utility to injects faults in a running cluster. + *
+ * ChaosMonkey defines Action's and Policy's. Actions are sequences of events, like + * - Select a random server to kill + * - Sleep for 5 sec + * - Start the server on the same host + * Actions can also be complex events, like rolling restart of all of the servers. + *
+ * Policies on the other hand are responsible for executing the actions based on a strategy. + * The default policy is to execute a random action every minute based on predefined action + * weights. ChaosMonkey executes predefined named policies until it is stopped. More than one + * policy can be active at any time. + *
+ * Chaos monkey can be run from the command line, or can be invoked from integration tests. + * See {@link org.apache.hadoop.hbase.IntegrationTestIngest} or other integration tests that use + * chaos monkey for code examples. + *
+ * ChaosMonkey class is indeed inspired by the Netflix's same-named tool: + * http://techblog.netflix.com/2012/07/chaos-monkey-released-into-wild.html + */ public abstract class ChaosMonkey implements Stoppable { public abstract void start() throws Exception; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java index 5a0416e6786..e2671103734 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java @@ -30,25 +30,7 @@ import org.apache.hadoop.hbase.chaos.policies.Policy; import org.apache.hadoop.hbase.util.Pair; /** - * A utility to injects faults in a running cluster. - *
- * ChaosMonkey defines Action's and Policy's. Actions are sequences of events, like - * - Select a random server to kill - * - Sleep for 5 sec - * - Start the server on the same host - * Actions can also be complex events, like rolling restart of all of the servers. - *
- * Policies on the other hand are responsible for executing the actions based on a strategy. - * The default policy is to execute a random action every minute based on predefined action - * weights. ChaosMonkey executes predefined named policies until it is stopped. More than one - * policy can be active at any time. - *
- * Chaos monkey can be run from the command line, or can be invoked from integration tests. - * See {@link org.apache.hadoop.hbase.IntegrationTestIngest} or other integration tests that use - * chaos monkey for code examples. - *
- * ChaosMonkey class is indeed inspired by the Netflix's same-named tool:
- * http://techblog.netflix.com/2012/07/chaos-monkey-released-into-wild.html
+ * Chaos monkey that given multiple policies will run actions against the cluster.
*/
public class PolicyBasedChaosMonkey extends ChaosMonkey {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
index 239f9aa70e2..84c33adac86 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
@@ -329,10 +329,6 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
private Long order;
- public LinkKey() {
-
- }
-
public LinkKey(long chainId, long order) {
this.chainId = chainId;
this.order = order;
@@ -373,9 +369,6 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
return rk;
}
- public LinkChain() {
- }
-
public LinkChain(Long rk, Long next) {
this.rk = rk;
this.next = next;
@@ -416,8 +409,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
LinkChain linkChain,
int numPartitions) {
int hash = linkKey.getChainId().hashCode();
- int partition = hash % numPartitions;
- return partition;
+ return hash % numPartitions;
}
}
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
index 8542840220f..53eda270c2f 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
@@ -339,7 +339,9 @@ public class IntegrationTestImportTsv implements Configurable, Tool {
fout.write(Bytes.toBytes("testRunFromOutputCommitter\n"));
LOG.debug(format("Wrote test data to file: %s", inputPath));
} finally {
- fout.close();
+ if (fout != null) {
+ fout.close();
+ }
}
// create a parent job that ships the HBase dependencies. This is
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
index da7566e22b1..cb9f31177c5 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
@@ -445,7 +445,7 @@ public class IntegrationTestMTTR {
rs = table.getScanner(s);
Result result = rs.next();
- return rs != null && result != null && result.size() > 0;
+ return result != null && result.size() > 0;
} finally {
if (rs != null) {
rs.close();
@@ -510,7 +510,6 @@ public class IntegrationTestMTTR {
@Override
public Boolean call() throws Exception {
int colsPerKey = 10;
- int recordSize = 500;
int numServers = util.getHBaseClusterInterface().getInitialClusterStatus().getServersSize();
int numKeys = numServers * 5000;
int writeThreads = 10;
@@ -521,7 +520,7 @@ public class IntegrationTestMTTR {
do {
int ret = loadTool.run(new String[]{
"-tn", loadTableName.getNameAsString(),
- "-write", String.format("%d:%d:%d", colsPerKey, recordSize, writeThreads),
+ "-write", String.format("%d:%d:%d", colsPerKey, 500, writeThreads),
"-num_keys", String.valueOf(numKeys),
"-skip_init"
});
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 097b1672f4f..9df9ad6e1e3 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -28,6 +28,7 @@ import java.util.Random;
import java.util.Set;
import java.util.UUID;
+import com.google.common.collect.Sets;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
@@ -204,10 +205,6 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
private static final Log LOG = LogFactory.getLog(Generator.class);
- public static enum Counts {
- UNREFERENCED, UNDEFINED, REFERENCED, CORRUPT
- }
-
static class GeneratorInputFormat extends InputFormat