HBASE-9253 clean up IT test code.
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1515207 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1668952866
commit
e91ef96624
|
@ -167,7 +167,7 @@ public class IntegrationTestLazyCfLoading {
|
|||
scf.setFilterIfMissing(true);
|
||||
return scf;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
|
|
|
@ -85,7 +85,7 @@ public class IntegrationTestManyRegions {
|
|||
LOG.info(String.format("Deleting existing table %s.", TABLE_NAME));
|
||||
if (admin.isTableEnabled(TABLE_NAME)) admin.disableTable(TABLE_NAME);
|
||||
admin.deleteTable(TABLE_NAME);
|
||||
LOG.info(String.format("Existing table %s deleted."));
|
||||
LOG.info(String.format("Existing table %s deleted.", TABLE_NAME));
|
||||
}
|
||||
LOG.info("Cluster ready");
|
||||
}
|
||||
|
|
|
@ -102,8 +102,8 @@ public class IntegrationTestsDriver extends AbstractHBaseTool {
|
|||
IntegrationTestingUtility.setUseDistributedCluster(conf);
|
||||
Class<?>[] classes = findIntegrationTestClasses();
|
||||
LOG.info("Found " + classes.length + " integration tests to run:");
|
||||
for (int i = 0; i < classes.length; i++) {
|
||||
LOG.info(" " + classes[i]);
|
||||
for (Class<?> aClass : classes) {
|
||||
LOG.info(" " + aClass);
|
||||
}
|
||||
JUnitCore junit = new JUnitCore();
|
||||
junit.addListener(new TextListener(System.out));
|
||||
|
|
|
@ -56,7 +56,7 @@ public class Action {
|
|||
initialServers = regionServers.toArray(new ServerName[regionServers.size()]);
|
||||
}
|
||||
|
||||
public void perform() throws Exception { };
|
||||
public void perform() throws Exception { }
|
||||
|
||||
/** Returns current region servers */
|
||||
protected ServerName[] getCurrentServers() throws IOException {
|
||||
|
|
|
@ -31,13 +31,11 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
* Action the adds a column family to a table.
|
||||
*/
|
||||
public class AddColumnAction extends Action {
|
||||
private final Random random;
|
||||
private final byte[] tableName;
|
||||
private HBaseAdmin admin;
|
||||
|
||||
public AddColumnAction(String tableName) {
|
||||
this.tableName = Bytes.toBytes(tableName);
|
||||
this.random = new Random();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,8 +24,8 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Created by eclark on 8/12/13.
|
||||
*/
|
||||
* Action that queues a table compaction.
|
||||
*/
|
||||
public class CompactTableAction extends Action {
|
||||
private final byte[] tableNameBytes;
|
||||
private final int majorRatio;
|
||||
|
|
|
@ -58,7 +58,8 @@ public class RemoveColumnAction extends Action {
|
|||
}
|
||||
|
||||
int index = random.nextInt(columnDescriptors.length);
|
||||
while(protectedColumns.contains(columnDescriptors[index].getNameAsString())) {
|
||||
while(protectedColumns != null &&
|
||||
protectedColumns.contains(columnDescriptors[index].getNameAsString())) {
|
||||
index = random.nextInt(columnDescriptors.length);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
|
||||
/**
|
||||
* Created by eclark on 8/12/13.
|
||||
*/
|
||||
* Action that restarts a random HRegionServer
|
||||
*/
|
||||
public class RestartRandomRsAction extends RestartActionBaseAction {
|
||||
public RestartRandomRsAction(long sleepTime) {
|
||||
super(sleepTime);
|
||||
|
|
|
@ -20,6 +20,27 @@ package org.apache.hadoop.hbase.chaos.monkies;
|
|||
|
||||
import org.apache.hadoop.hbase.Stoppable;
|
||||
|
||||
/**
|
||||
* A utility to injects faults in a running cluster.
|
||||
* <p>
|
||||
* ChaosMonkey defines Action's and Policy's. Actions are sequences of events, like
|
||||
* - Select a random server to kill
|
||||
* - Sleep for 5 sec
|
||||
* - Start the server on the same host
|
||||
* Actions can also be complex events, like rolling restart of all of the servers.
|
||||
* <p>
|
||||
* Policies on the other hand are responsible for executing the actions based on a strategy.
|
||||
* The default policy is to execute a random action every minute based on predefined action
|
||||
* weights. ChaosMonkey executes predefined named policies until it is stopped. More than one
|
||||
* policy can be active at any time.
|
||||
* <p>
|
||||
* Chaos monkey can be run from the command line, or can be invoked from integration tests.
|
||||
* See {@link org.apache.hadoop.hbase.IntegrationTestIngest} or other integration tests that use
|
||||
* chaos monkey for code examples.
|
||||
* <p>
|
||||
* ChaosMonkey class is indeed inspired by the Netflix's same-named tool:
|
||||
* http://techblog.netflix.com/2012/07/chaos-monkey-released-into-wild.html
|
||||
*/
|
||||
public abstract class ChaosMonkey implements Stoppable {
|
||||
public abstract void start() throws Exception;
|
||||
|
||||
|
|
|
@ -30,25 +30,7 @@ import org.apache.hadoop.hbase.chaos.policies.Policy;
|
|||
import org.apache.hadoop.hbase.util.Pair;
|
||||
|
||||
/**
|
||||
* A utility to injects faults in a running cluster.
|
||||
* <p>
|
||||
* ChaosMonkey defines Action's and Policy's. Actions are sequences of events, like
|
||||
* - Select a random server to kill
|
||||
* - Sleep for 5 sec
|
||||
* - Start the server on the same host
|
||||
* Actions can also be complex events, like rolling restart of all of the servers.
|
||||
* <p>
|
||||
* Policies on the other hand are responsible for executing the actions based on a strategy.
|
||||
* The default policy is to execute a random action every minute based on predefined action
|
||||
* weights. ChaosMonkey executes predefined named policies until it is stopped. More than one
|
||||
* policy can be active at any time.
|
||||
* <p>
|
||||
* Chaos monkey can be run from the command line, or can be invoked from integration tests.
|
||||
* See {@link org.apache.hadoop.hbase.IntegrationTestIngest} or other integration tests that use
|
||||
* chaos monkey for code examples.
|
||||
* <p>
|
||||
* ChaosMonkey class is indeed inspired by the Netflix's same-named tool:
|
||||
* http://techblog.netflix.com/2012/07/chaos-monkey-released-into-wild.html
|
||||
* Chaos monkey that given multiple policies will run actions against the cluster.
|
||||
*/
|
||||
public class PolicyBasedChaosMonkey extends ChaosMonkey {
|
||||
|
||||
|
|
|
@ -329,10 +329,6 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
|
||||
private Long order;
|
||||
|
||||
public LinkKey() {
|
||||
|
||||
}
|
||||
|
||||
public LinkKey(long chainId, long order) {
|
||||
this.chainId = chainId;
|
||||
this.order = order;
|
||||
|
@ -373,9 +369,6 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
return rk;
|
||||
}
|
||||
|
||||
public LinkChain() {
|
||||
}
|
||||
|
||||
public LinkChain(Long rk, Long next) {
|
||||
this.rk = rk;
|
||||
this.next = next;
|
||||
|
@ -416,8 +409,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
LinkChain linkChain,
|
||||
int numPartitions) {
|
||||
int hash = linkKey.getChainId().hashCode();
|
||||
int partition = hash % numPartitions;
|
||||
return partition;
|
||||
return hash % numPartitions;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -339,7 +339,9 @@ public class IntegrationTestImportTsv implements Configurable, Tool {
|
|||
fout.write(Bytes.toBytes("testRunFromOutputCommitter\n"));
|
||||
LOG.debug(format("Wrote test data to file: %s", inputPath));
|
||||
} finally {
|
||||
fout.close();
|
||||
if (fout != null) {
|
||||
fout.close();
|
||||
}
|
||||
}
|
||||
|
||||
// create a parent job that ships the HBase dependencies. This is
|
||||
|
|
|
@ -445,7 +445,7 @@ public class IntegrationTestMTTR {
|
|||
|
||||
rs = table.getScanner(s);
|
||||
Result result = rs.next();
|
||||
return rs != null && result != null && result.size() > 0;
|
||||
return result != null && result.size() > 0;
|
||||
} finally {
|
||||
if (rs != null) {
|
||||
rs.close();
|
||||
|
@ -510,7 +510,6 @@ public class IntegrationTestMTTR {
|
|||
@Override
|
||||
public Boolean call() throws Exception {
|
||||
int colsPerKey = 10;
|
||||
int recordSize = 500;
|
||||
int numServers = util.getHBaseClusterInterface().getInitialClusterStatus().getServersSize();
|
||||
int numKeys = numServers * 5000;
|
||||
int writeThreads = 10;
|
||||
|
@ -521,7 +520,7 @@ public class IntegrationTestMTTR {
|
|||
do {
|
||||
int ret = loadTool.run(new String[]{
|
||||
"-tn", loadTableName.getNameAsString(),
|
||||
"-write", String.format("%d:%d:%d", colsPerKey, recordSize, writeThreads),
|
||||
"-write", String.format("%d:%d:%d", colsPerKey, 500, writeThreads),
|
||||
"-num_keys", String.valueOf(numKeys),
|
||||
"-skip_init"
|
||||
});
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.util.Random;
|
|||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.GnuParser;
|
||||
import org.apache.commons.cli.HelpFormatter;
|
||||
|
@ -204,10 +205,6 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
|
||||
private static final Log LOG = LogFactory.getLog(Generator.class);
|
||||
|
||||
public static enum Counts {
|
||||
UNREFERENCED, UNDEFINED, REFERENCED, CORRUPT
|
||||
}
|
||||
|
||||
static class GeneratorInputFormat extends InputFormat<BytesWritable,NullWritable> {
|
||||
static class GeneratorInputSplit extends InputSplit implements Writable {
|
||||
@Override
|
||||
|
@ -322,7 +319,6 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
*/
|
||||
static class GeneratorMapper
|
||||
extends Mapper<BytesWritable, NullWritable, NullWritable, NullWritable> {
|
||||
Random rand = new Random();
|
||||
|
||||
byte[][] first = null;
|
||||
byte[][] prev = null;
|
||||
|
@ -350,11 +346,11 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
if (this.numNodes < this.wrap) {
|
||||
this.wrap = this.numNodes;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
protected void cleanup(Context context) throws IOException ,InterruptedException {
|
||||
table.close();
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void map(BytesWritable key, NullWritable value, Context output) throws IOException {
|
||||
|
@ -593,7 +589,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
context.getCounter(Counts.UNREFERENCED).increment(1);
|
||||
} else {
|
||||
if (refs.size() > 1) {
|
||||
context.write(new Text(keyString), new Text(refsSb.toString()));
|
||||
if (refsSb != null) {
|
||||
context.write(new Text(keyString), new Text(refsSb.toString()));
|
||||
}
|
||||
context.getCounter(Counts.EXTRAREFERENCES).increment(refs.size() - 1);
|
||||
}
|
||||
// node is defined and referenced
|
||||
|
@ -1067,18 +1065,18 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
|
||||
@Override
|
||||
protected Set<String> getColumnFamilies() {
|
||||
return null;
|
||||
return Sets.newHashSet(Bytes.toString(FAMILY_NAME));
|
||||
}
|
||||
|
||||
private static void setJobConf(Job job, int numMappers, long numNodes,
|
||||
Integer width, Integer wrapMuplitplier) {
|
||||
Integer width, Integer wrapMultiplier) {
|
||||
job.getConfiguration().setInt(GENERATOR_NUM_MAPPERS_KEY, numMappers);
|
||||
job.getConfiguration().setLong(GENERATOR_NUM_ROWS_PER_MAP_KEY, numNodes);
|
||||
if (width != null) {
|
||||
job.getConfiguration().setInt(GENERATOR_WIDTH_KEY, width.intValue());
|
||||
job.getConfiguration().setInt(GENERATOR_WIDTH_KEY, width);
|
||||
}
|
||||
if (wrapMuplitplier != null) {
|
||||
job.getConfiguration().setInt(GENERATOR_WRAP_KEY, wrapMuplitplier.intValue());
|
||||
if (wrapMultiplier != null) {
|
||||
job.getConfiguration().setInt(GENERATOR_WRAP_KEY, wrapMultiplier);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.util.UUID;
|
|||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -114,7 +113,7 @@ public class IntegrationTestLoadAndVerify extends IntegrationTestBase {
|
|||
private enum Counters {
|
||||
ROWS_WRITTEN,
|
||||
REFERENCES_WRITTEN,
|
||||
REFERENCES_CHECKED;
|
||||
REFERENCES_CHECKED
|
||||
}
|
||||
|
||||
@Before
|
||||
|
@ -122,9 +121,11 @@ public class IntegrationTestLoadAndVerify extends IntegrationTestBase {
|
|||
util = getTestingUtil(getConf());
|
||||
util.initializeCluster(3);
|
||||
this.setConf(util.getConfiguration());
|
||||
getConf().setLong(NUM_TO_WRITE_KEY, NUM_TO_WRITE_DEFAULT / 100);
|
||||
getConf().setInt(NUM_MAP_TASKS_KEY, NUM_MAP_TASKS_DEFAULT / 100);
|
||||
getConf().setInt(NUM_REDUCE_TASKS_KEY, NUM_REDUCE_TASKS_DEFAULT / 10);
|
||||
if (!util.isDistributedCluster()) {
|
||||
getConf().setLong(NUM_TO_WRITE_KEY, NUM_TO_WRITE_DEFAULT / 100);
|
||||
getConf().setInt(NUM_MAP_TASKS_KEY, NUM_MAP_TASKS_DEFAULT / 100);
|
||||
getConf().setInt(NUM_REDUCE_TASKS_KEY, NUM_REDUCE_TASKS_DEFAULT / 10);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -360,8 +361,7 @@ public class IntegrationTestLoadAndVerify extends IntegrationTestBase {
|
|||
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
|
||||
|
||||
HBaseAdmin admin = getTestingUtil(getConf()).getHBaseAdmin();
|
||||
int numPreCreate = 40;
|
||||
admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPreCreate);
|
||||
admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), 40);
|
||||
|
||||
doLoad(getConf(), htd);
|
||||
doVerify(getConf(), htd);
|
||||
|
|
Loading…
Reference in New Issue