HBASE-410 [testing] Speed up the test suite; Moved TestHBaseCluster, TestMasterAdmin and TestForceSplit into TestAdmin
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@825762 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
3a591a989c
commit
a54c987f0c
|
@ -1,216 +0,0 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Test HBase Master and Region servers, client API
|
||||
*/
|
||||
public class TestHBaseCluster extends HBaseClusterTestCase {
|
||||
private static final Log LOG = LogFactory.getLog(TestHBaseCluster.class);
|
||||
|
||||
private HTableDescriptor desc;
|
||||
private HBaseAdmin admin;
|
||||
private HTable table;
|
||||
|
||||
/** constructor */
|
||||
public TestHBaseCluster() {
|
||||
super();
|
||||
this.desc = null;
|
||||
this.admin = null;
|
||||
this.table = null;
|
||||
|
||||
// Make the thread wake frequency a little slower so other threads
|
||||
// can run
|
||||
conf.setInt("hbase.server.thread.wakefrequency", 2000);
|
||||
|
||||
// Make lease timeout longer, lease checks less frequent
|
||||
conf.setInt("hbase.master.lease.period", 10 * 1000);
|
||||
|
||||
// Increase the amount of time between client retries
|
||||
conf.setLong("hbase.client.pause", 15 * 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Since all the "tests" depend on the results of the previous test, they are
|
||||
* not Junit tests that can stand alone. Consequently we have a single Junit
|
||||
* test that runs the "sub-tests" as private methods.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void testHBaseCluster() throws IOException {
|
||||
setup();
|
||||
basic();
|
||||
scanner();
|
||||
listTables();
|
||||
}
|
||||
|
||||
private static final int FIRST_ROW = 1;
|
||||
private static final int NUM_VALS = 1000;
|
||||
private static final byte [] CONTENTS_CF = Bytes.toBytes("contents");
|
||||
private static final String CONTENTS_CQ_STR = "basic";
|
||||
private static final byte [] CONTENTS_CQ = Bytes.toBytes(CONTENTS_CQ_STR);
|
||||
private static final String CONTENTSTR = "contentstr";
|
||||
//
|
||||
private static final byte [] ANCHOR_CF = Bytes.toBytes("anchor");
|
||||
private static final String ANCHORNUM_CQ = "anchornum-";
|
||||
private static final String ANCHORSTR_VALUE = "anchorstr";
|
||||
|
||||
private void setup() throws IOException {
|
||||
desc = new HTableDescriptor("test");
|
||||
desc.addFamily(new HColumnDescriptor(CONTENTS_CF));
|
||||
desc.addFamily(new HColumnDescriptor(ANCHOR_CF));
|
||||
admin = new HBaseAdmin(conf);
|
||||
admin.createTable(desc);
|
||||
table = new HTable(conf, desc.getName());
|
||||
}
|
||||
|
||||
// Test basic functionality. Writes to contents:basic and anchor:anchornum-*
|
||||
|
||||
private void basic() throws IOException {
|
||||
long startTime = System.currentTimeMillis();
|
||||
|
||||
// Write out a bunch of values
|
||||
|
||||
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
||||
Put put = new Put(Bytes.toBytes("row_" + k));
|
||||
put.add(CONTENTS_CF, CONTENTS_CQ, Bytes.toBytes(CONTENTSTR + k));
|
||||
put.add(ANCHOR_CF, Bytes.toBytes(ANCHORNUM_CQ + k), Bytes.toBytes(ANCHORSTR_VALUE + k));
|
||||
table.put(put);
|
||||
}
|
||||
LOG.info("Write " + NUM_VALS + " rows. Elapsed time: "
|
||||
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||
|
||||
// Read them back in
|
||||
|
||||
startTime = System.currentTimeMillis();
|
||||
|
||||
byte [] collabel = null;
|
||||
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
||||
String rowlabelStr = "row_" + k;
|
||||
byte [] rowlabel = Bytes.toBytes(rowlabelStr);
|
||||
|
||||
Get get = new Get(rowlabel);
|
||||
get.addColumn(CONTENTS_CF, CONTENTS_CQ);
|
||||
byte [] bodydata = table.get(get).getValue(CONTENTS_CF, CONTENTS_CQ);
|
||||
assertNotNull("no data for row " + rowlabelStr + "/" + CONTENTS_CQ_STR,
|
||||
bodydata);
|
||||
String bodystr = new String(bodydata, HConstants.UTF8_ENCODING);
|
||||
String teststr = CONTENTSTR + k;
|
||||
assertTrue("Incorrect value for key: (" + rowlabelStr + "/" +
|
||||
CONTENTS_CQ_STR + "), expected: '" + teststr + "' got: '" +
|
||||
bodystr + "'", teststr.compareTo(bodystr) == 0);
|
||||
|
||||
String collabelStr = ANCHORNUM_CQ + k;
|
||||
collabel = Bytes.toBytes(collabelStr);
|
||||
|
||||
get = new Get(rowlabel);
|
||||
get.addColumn(ANCHOR_CF, collabel);
|
||||
|
||||
bodydata = table.get(get).getValue(ANCHOR_CF, collabel);
|
||||
assertNotNull("no data for row " + rowlabelStr + "/" + collabelStr, bodydata);
|
||||
bodystr = new String(bodydata, HConstants.UTF8_ENCODING);
|
||||
teststr = ANCHORSTR_VALUE + k;
|
||||
assertTrue("Incorrect value for key: (" + rowlabelStr + "/" + collabelStr +
|
||||
"), expected: '" + teststr + "' got: '" + bodystr + "'",
|
||||
teststr.compareTo(bodystr) == 0);
|
||||
}
|
||||
|
||||
LOG.info("Read " + NUM_VALS + " rows. Elapsed time: "
|
||||
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||
}
|
||||
|
||||
private void scanner() throws IOException {
|
||||
|
||||
long startTime = System.currentTimeMillis();
|
||||
|
||||
Scan scan = new Scan();
|
||||
scan.addFamily(ANCHOR_CF);
|
||||
scan.addColumn(CONTENTS_CF, CONTENTS_CQ);
|
||||
ResultScanner s = table.getScanner(scan);
|
||||
try {
|
||||
|
||||
int contentsFetched = 0;
|
||||
int anchorFetched = 0;
|
||||
int k = 0;
|
||||
for (Result curVals : s) {
|
||||
for(KeyValue kv : curVals.raw()) {
|
||||
byte [] family = kv.getFamily();
|
||||
byte [] qualifier = kv.getQualifier();
|
||||
String strValue = new String(kv.getValue());
|
||||
if(Bytes.equals(family, CONTENTS_CF)) {
|
||||
assertTrue("Error at:" + Bytes.toString(curVals.getRow())
|
||||
+ ", Value for " + Bytes.toString(qualifier) + " should start with: " + CONTENTSTR
|
||||
+ ", but was fetched as: " + strValue,
|
||||
strValue.startsWith(CONTENTSTR));
|
||||
contentsFetched++;
|
||||
|
||||
} else if(Bytes.equals(family, ANCHOR_CF)) {
|
||||
assertTrue("Error at:" + Bytes.toString(curVals.getRow())
|
||||
+ ", Value for " + Bytes.toString(qualifier) + " should start with: " + ANCHORSTR_VALUE
|
||||
+ ", but was fetched as: " + strValue,
|
||||
strValue.startsWith(ANCHORSTR_VALUE));
|
||||
anchorFetched++;
|
||||
|
||||
} else {
|
||||
LOG.info("Family: " + Bytes.toString(family) + ", Qualifier: " + Bytes.toString(qualifier));
|
||||
}
|
||||
}
|
||||
k++;
|
||||
}
|
||||
assertEquals("Expected " + NUM_VALS + " " +
|
||||
Bytes.toString(CONTENTS_CQ) + " values, but fetched " +
|
||||
contentsFetched,
|
||||
NUM_VALS, contentsFetched);
|
||||
assertEquals("Expected " + NUM_VALS + " " + ANCHORNUM_CQ +
|
||||
" values, but fetched " + anchorFetched,
|
||||
NUM_VALS, anchorFetched);
|
||||
|
||||
LOG.info("Scanned " + NUM_VALS
|
||||
+ " rows. Elapsed time: "
|
||||
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||
|
||||
} finally {
|
||||
s.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void listTables() throws IOException {
|
||||
HTableDescriptor[] tables = admin.listTables();
|
||||
assertEquals(1, tables.length);
|
||||
assertTrue(Bytes.equals(desc.getName(), tables[0].getName()));
|
||||
Collection<HColumnDescriptor> families = tables[0].getFamilies();
|
||||
assertEquals(2, families.size());
|
||||
assertTrue(tables[0].hasFamily(CONTENTS_CF));
|
||||
assertTrue(tables[0].hasFamily(ANCHOR_CF));
|
||||
}
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/** tests administrative functions */
|
||||
public class TestMasterAdmin extends HBaseClusterTestCase {
|
||||
private final Log LOG = LogFactory.getLog(this.getClass().getName());
|
||||
|
||||
private static final byte [] FAMILY_NAME = Bytes.toBytes("col1");
|
||||
private static HTableDescriptor testDesc;
|
||||
static {
|
||||
testDesc = new HTableDescriptor("testadmin1");
|
||||
testDesc.addFamily(new HColumnDescriptor(FAMILY_NAME));
|
||||
}
|
||||
|
||||
private HBaseAdmin admin;
|
||||
|
||||
/** constructor */
|
||||
public TestMasterAdmin() {
|
||||
super();
|
||||
admin = null;
|
||||
|
||||
// Make the thread wake frequency a little slower so other threads
|
||||
// can run
|
||||
conf.setInt("hbase.server.thread.wakefrequency", 2000);
|
||||
}
|
||||
|
||||
/** @throws Exception */
|
||||
public void testMasterAdmin() throws Exception {
|
||||
admin = new HBaseAdmin(conf);
|
||||
// Add test that exception is thrown if descriptor is without a table name.
|
||||
// HADOOP-2156.
|
||||
boolean exception = false;
|
||||
try {
|
||||
admin.createTable(new HTableDescriptor());
|
||||
} catch (IllegalArgumentException e) {
|
||||
exception = true;
|
||||
}
|
||||
assertTrue(exception);
|
||||
admin.createTable(testDesc);
|
||||
LOG.info("Table " + testDesc.getNameAsString() + " created");
|
||||
admin.disableTable(testDesc.getName());
|
||||
LOG.info("Table " + testDesc.getNameAsString() + " disabled");
|
||||
try {
|
||||
@SuppressWarnings("unused")
|
||||
HTable table = new HTable(conf, testDesc.getName());
|
||||
} catch (org.apache.hadoop.hbase.client.RegionOfflineException e) {
|
||||
// Expected
|
||||
}
|
||||
|
||||
admin.addColumn(testDesc.getName(), new HColumnDescriptor("col2"));
|
||||
admin.enableTable(testDesc.getName());
|
||||
try {
|
||||
admin.deleteColumn(testDesc.getName(), Bytes.toBytes("col2"));
|
||||
} catch(TableNotDisabledException e) {
|
||||
// Expected
|
||||
}
|
||||
|
||||
admin.disableTable(testDesc.getName());
|
||||
admin.deleteColumn(testDesc.getName(), Bytes.toBytes("col2"));
|
||||
admin.deleteTable(testDesc.getName());
|
||||
}
|
||||
}
|
|
@ -230,4 +230,4 @@ public class TestRegionRebalancing extends HBaseClusterTestCase {
|
|||
region.getLog().closeAndDelete();
|
||||
return region;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -195,4 +195,4 @@ public class TestScanMultipleVersions extends HBaseClusterTestCase {
|
|||
s.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,182 +0,0 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase;
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/** Tests table creation restrictions*/
|
||||
public class TestTable extends HBaseClusterTestCase {
|
||||
/**
|
||||
* the test
|
||||
* @throws IOException
|
||||
*/
|
||||
public void testCreateTable() throws IOException {
|
||||
final HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
String msg = null;
|
||||
try {
|
||||
admin.createTable(HTableDescriptor.ROOT_TABLEDESC);
|
||||
} catch (IllegalArgumentException e) {
|
||||
msg = e.toString();
|
||||
}
|
||||
assertTrue("Unexcepted exception message " + msg, msg != null &&
|
||||
msg.startsWith(IllegalArgumentException.class.getName()) &&
|
||||
msg.contains(HTableDescriptor.ROOT_TABLEDESC.getNameAsString()));
|
||||
|
||||
msg = null;
|
||||
try {
|
||||
admin.createTable(HTableDescriptor.META_TABLEDESC);
|
||||
} catch(IllegalArgumentException e) {
|
||||
msg = e.toString();
|
||||
}
|
||||
assertTrue("Unexcepted exception message " + msg, msg != null &&
|
||||
msg.startsWith(IllegalArgumentException.class.getName()) &&
|
||||
msg.contains(HTableDescriptor.META_TABLEDESC.getNameAsString()));
|
||||
|
||||
// Try doing a duplicate database create.
|
||||
msg = null;
|
||||
HTableDescriptor desc = new HTableDescriptor(getName());
|
||||
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
|
||||
admin.createTable(desc);
|
||||
assertTrue("First table creation completed", admin.listTables().length == 1);
|
||||
boolean gotException = false;
|
||||
try {
|
||||
admin.createTable(desc);
|
||||
} catch (TableExistsException e) {
|
||||
gotException = true;
|
||||
msg = e.getMessage();
|
||||
}
|
||||
assertTrue("Didn't get a TableExistsException!", gotException);
|
||||
assertTrue("Unexpected exception message " + msg, msg != null &&
|
||||
msg.contains(getName()));
|
||||
|
||||
// Now try and do concurrent creation with a bunch of threads.
|
||||
final HTableDescriptor threadDesc =
|
||||
new HTableDescriptor("threaded_" + getName());
|
||||
threadDesc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
|
||||
int count = 10;
|
||||
Thread [] threads = new Thread [count];
|
||||
final AtomicInteger successes = new AtomicInteger(0);
|
||||
final AtomicInteger failures = new AtomicInteger(0);
|
||||
for (int i = 0; i < count; i++) {
|
||||
threads[i] = new Thread(Integer.toString(i)) {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
admin.createTable(threadDesc);
|
||||
successes.incrementAndGet();
|
||||
} catch (TableExistsException e) {
|
||||
failures.incrementAndGet();
|
||||
} catch (IOException e) {
|
||||
System.out.println("Got an IOException... " + e);
|
||||
fail();
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
for (int i = 0; i < count; i++) {
|
||||
threads[i].start();
|
||||
}
|
||||
for (int i = 0; i < count; i++) {
|
||||
while(threads[i].isAlive()) {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
// continue
|
||||
}
|
||||
}
|
||||
}
|
||||
// All threads are now dead. Count up how many tables were created and
|
||||
// how many failed w/ appropriate exception.
|
||||
assertEquals(1, successes.get());
|
||||
assertEquals(count - 1, failures.get());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for hadoop-1581 'HBASE: Unopenable tablename bug'.
|
||||
* @throws Exception
|
||||
*/
|
||||
public void testTableNameClash() throws Exception {
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
admin.createTable(new HTableDescriptor(getName() + "SOMEUPPERCASE"));
|
||||
admin.createTable(new HTableDescriptor(getName()));
|
||||
// Before fix, below would fail throwing a NoServerForRegionException.
|
||||
@SuppressWarnings("unused")
|
||||
HTable table = new HTable(conf, getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test read only tables
|
||||
* @throws Exception
|
||||
*/
|
||||
public void testReadOnlyTable() throws Exception {
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
HTableDescriptor desc = new HTableDescriptor(getName());
|
||||
byte[] colName = Bytes.toBytes("test");
|
||||
desc.addFamily(new HColumnDescriptor(colName));
|
||||
desc.setReadOnly(true);
|
||||
admin.createTable(desc);
|
||||
HTable table = new HTable(conf, getName());
|
||||
try {
|
||||
byte[] value = Bytes.toBytes("somedata");
|
||||
// This used to use an empty row... That must have been a bug
|
||||
Put put = new Put(value);
|
||||
byte [][] famAndQf = KeyValue.parseColumn(colName);
|
||||
put.add(famAndQf[0], famAndQf[1], value);
|
||||
table.put(put);
|
||||
fail("Put on read only table succeeded");
|
||||
} catch (Exception e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that user table names can contain '-' and '.' so long as they do not
|
||||
* start with same. HBASE-771
|
||||
*/
|
||||
public void testTableNames() {
|
||||
byte[][] illegalNames = new byte[][] {
|
||||
Bytes.toBytes("-bad"),
|
||||
Bytes.toBytes(".bad"),
|
||||
HConstants.ROOT_TABLE_NAME,
|
||||
HConstants.META_TABLE_NAME
|
||||
};
|
||||
for (int i = 0; i < illegalNames.length; i++) {
|
||||
try {
|
||||
new HTableDescriptor(illegalNames[i]);
|
||||
fail("Did not detect '" + Bytes.toString(illegalNames[i]) +
|
||||
"' as an illegal user table name");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
byte[] legalName = Bytes.toBytes("g-oo.d");
|
||||
try {
|
||||
new HTableDescriptor(legalName);
|
||||
} catch (IllegalArgumentException e) {
|
||||
fail("Legal user table name: '" + Bytes.toString(legalName) +
|
||||
"' caused IllegalArgumentException: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -21,23 +21,34 @@ package org.apache.hadoop.hbase.client;
|
|||
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.TableExistsException;
|
||||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* Class to test HBaseAdmin.
|
||||
* Spins up the minicluster once at test start and then takes it down afterward.
|
||||
* Add any testing of HBaseAdmin functionality here.
|
||||
*/
|
||||
public class TestAdmin {
|
||||
final Log LOG = LogFactory.getLog(getClass());
|
||||
|
@ -113,4 +124,267 @@ public class TestAdmin {
|
|||
exist = this.admin.tableExists(table);
|
||||
assertEquals(true, exist);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests forcing split from client and having scanners successfully ride over split.
|
||||
* @throws Exception
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testForceSplit() throws Exception {
|
||||
byte [] familyName = HConstants.CATALOG_FAMILY;
|
||||
byte [] tableName = Bytes.toBytes("testForceSplit");
|
||||
final HTable table = TEST_UTIL.createTable(tableName, familyName);
|
||||
byte[] k = new byte[3];
|
||||
int rowCount = 0;
|
||||
for (byte b1 = 'a'; b1 < 'z'; b1++) {
|
||||
for (byte b2 = 'a'; b2 < 'z'; b2++) {
|
||||
for (byte b3 = 'a'; b3 < 'z'; b3++) {
|
||||
k[0] = b1;
|
||||
k[1] = b2;
|
||||
k[2] = b3;
|
||||
Put put = new Put(k);
|
||||
put.add(familyName, new byte[0], k);
|
||||
table.put(put);
|
||||
rowCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get the initial layout (should just be one region)
|
||||
Map<HRegionInfo,HServerAddress> m = table.getRegionsInfo();
|
||||
System.out.println("Initial regions (" + m.size() + "): " + m);
|
||||
assertTrue(m.size() == 1);
|
||||
|
||||
// Verify row count
|
||||
Scan scan = new Scan();
|
||||
ResultScanner scanner = table.getScanner(scan);
|
||||
int rows = 0;
|
||||
for(@SuppressWarnings("unused") Result result : scanner) {
|
||||
rows++;
|
||||
}
|
||||
scanner.close();
|
||||
assertEquals(rowCount, rows);
|
||||
|
||||
// Have an outstanding scan going on to make sure we can scan over splits.
|
||||
scan = new Scan();
|
||||
scanner = table.getScanner(scan);
|
||||
// Scan first row so we are into first region before split happens.
|
||||
scanner.next();
|
||||
|
||||
final AtomicInteger count = new AtomicInteger(0);
|
||||
Thread t = new Thread("CheckForSplit") {
|
||||
public void run() {
|
||||
for (int i = 0; i < 20; i++) {
|
||||
try {
|
||||
sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
continue;
|
||||
}
|
||||
// check again table = new HTable(conf, tableName);
|
||||
Map<HRegionInfo, HServerAddress> regions = null;
|
||||
try {
|
||||
regions = table.getRegionsInfo();
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
if (regions == null) continue;
|
||||
count.set(regions.size());
|
||||
if (count.get() >= 2) break;
|
||||
LOG.debug("Cycle waiting on split");
|
||||
}
|
||||
}
|
||||
};
|
||||
t.start();
|
||||
// tell the master to split the table
|
||||
admin.split(Bytes.toString(tableName));
|
||||
t.join();
|
||||
|
||||
// Verify row count
|
||||
rows = 1; // We counted one row above.
|
||||
for (@SuppressWarnings("unused") Result result : scanner) {
|
||||
rows++;
|
||||
if (rows > rowCount) {
|
||||
scanner.close();
|
||||
assertTrue("Scanned more than expected (" + rowCount + ")", false);
|
||||
}
|
||||
}
|
||||
scanner.close();
|
||||
assertEquals(rowCount, rows);
|
||||
}
|
||||
|
||||
/**
|
||||
* HADOOP-2156
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test (expected=IllegalArgumentException.class)
|
||||
public void testEmptyHHTableDescriptor() throws IOException {
|
||||
this.admin.createTable(new HTableDescriptor());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEnableDisableAddColumnDeleteColumn() throws Exception {
|
||||
byte [] tableName = Bytes.toBytes("testMasterAdmin");
|
||||
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
|
||||
this.admin.disableTable(tableName);
|
||||
try {
|
||||
new HTable(TEST_UTIL.getConfiguration(), tableName);
|
||||
} catch (org.apache.hadoop.hbase.client.RegionOfflineException e) {
|
||||
// Expected
|
||||
}
|
||||
this.admin.addColumn(tableName, new HColumnDescriptor("col2"));
|
||||
this.admin.enableTable(tableName);
|
||||
try {
|
||||
this.admin.deleteColumn(tableName, Bytes.toBytes("col2"));
|
||||
} catch(TableNotDisabledException e) {
|
||||
// Expected
|
||||
}
|
||||
this.admin.disableTable(tableName);
|
||||
this.admin.deleteColumn(tableName, Bytes.toBytes("col2"));
|
||||
this.admin.deleteTable(tableName);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateBadTables() throws IOException {
|
||||
String msg = null;
|
||||
try {
|
||||
this.admin.createTable(HTableDescriptor.ROOT_TABLEDESC);
|
||||
} catch (IllegalArgumentException e) {
|
||||
msg = e.toString();
|
||||
}
|
||||
assertTrue("Unexcepted exception message " + msg, msg != null &&
|
||||
msg.startsWith(IllegalArgumentException.class.getName()) &&
|
||||
msg.contains(HTableDescriptor.ROOT_TABLEDESC.getNameAsString()));
|
||||
msg = null;
|
||||
try {
|
||||
this.admin.createTable(HTableDescriptor.META_TABLEDESC);
|
||||
} catch(IllegalArgumentException e) {
|
||||
msg = e.toString();
|
||||
}
|
||||
assertTrue("Unexcepted exception message " + msg, msg != null &&
|
||||
msg.startsWith(IllegalArgumentException.class.getName()) &&
|
||||
msg.contains(HTableDescriptor.META_TABLEDESC.getNameAsString()));
|
||||
|
||||
// Now try and do concurrent creation with a bunch of threads.
|
||||
final HTableDescriptor threadDesc =
|
||||
new HTableDescriptor("threaded_testCreateBadTables");
|
||||
threadDesc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
|
||||
int count = 10;
|
||||
Thread [] threads = new Thread [count];
|
||||
final AtomicInteger successes = new AtomicInteger(0);
|
||||
final AtomicInteger failures = new AtomicInteger(0);
|
||||
final HBaseAdmin localAdmin = this.admin;
|
||||
for (int i = 0; i < count; i++) {
|
||||
threads[i] = new Thread(Integer.toString(i)) {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
localAdmin.createTable(threadDesc);
|
||||
successes.incrementAndGet();
|
||||
} catch (TableExistsException e) {
|
||||
failures.incrementAndGet();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed threaded create" + getName(), e);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
for (int i = 0; i < count; i++) {
|
||||
threads[i].start();
|
||||
}
|
||||
for (int i = 0; i < count; i++) {
|
||||
while(threads[i].isAlive()) {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
// continue
|
||||
}
|
||||
}
|
||||
}
|
||||
// All threads are now dead. Count up how many tables were created and
|
||||
// how many failed w/ appropriate exception.
|
||||
assertEquals(1, successes.get());
|
||||
assertEquals(count - 1, failures.get());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for hadoop-1581 'HBASE: Unopenable tablename bug'.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testTableNameClash() throws Exception {
|
||||
String name = "testTableNameClash";
|
||||
admin.createTable(new HTableDescriptor(name + "SOMEUPPERCASE"));
|
||||
admin.createTable(new HTableDescriptor(name));
|
||||
// Before fix, below would fail throwing a NoServerForRegionException.
|
||||
new HTable(TEST_UTIL.getConfiguration(), name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test read only tables
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testReadOnlyTable() throws Exception {
|
||||
byte [] name = Bytes.toBytes("testReadOnlyTable");
|
||||
HTable table = TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY);
|
||||
byte[] value = Bytes.toBytes("somedata");
|
||||
// This used to use an empty row... That must have been a bug
|
||||
Put put = new Put(value);
|
||||
put.add(HConstants.CATALOG_FAMILY, HConstants.CATALOG_FAMILY, value);
|
||||
table.put(put);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that user table names can contain '-' and '.' so long as they do not
|
||||
* start with same. HBASE-771
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testTableNames() throws IOException {
|
||||
byte[][] illegalNames = new byte[][] {
|
||||
Bytes.toBytes("-bad"),
|
||||
Bytes.toBytes(".bad"),
|
||||
HConstants.ROOT_TABLE_NAME,
|
||||
HConstants.META_TABLE_NAME
|
||||
};
|
||||
for (int i = 0; i < illegalNames.length; i++) {
|
||||
try {
|
||||
new HTableDescriptor(illegalNames[i]);
|
||||
throw new IOException("Did not detect '" +
|
||||
Bytes.toString(illegalNames[i]) + "' as an illegal user table name");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
byte[] legalName = Bytes.toBytes("g-oo.d");
|
||||
try {
|
||||
new HTableDescriptor(legalName);
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new IOException("Legal user table name: '" +
|
||||
Bytes.toString(legalName) + "' caused IllegalArgumentException: " +
|
||||
e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* For HADOOP-2579
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test (expected=TableExistsException.class)
|
||||
public void testTableNotFoundExceptionWithATable() throws IOException {
|
||||
final byte [] name = Bytes.toBytes("testTableNotFoundExceptionWithATable");
|
||||
TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY);
|
||||
TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY);
|
||||
}
|
||||
|
||||
/**
|
||||
* For HADOOP-2579
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test (expected=TableNotFoundException.class)
|
||||
public void testTableNotFoundExceptionWithoutAnyTables() throws IOException {
|
||||
new HTable(TEST_UTIL.getConfiguration(),
|
||||
"testTableNotFoundExceptionWithoutAnyTables");
|
||||
}
|
||||
}
|
|
@ -1,140 +0,0 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HBaseClusterTestCase;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Tests forced splitting of HTable
|
||||
*/
|
||||
public class TestForceSplit extends HBaseClusterTestCase {
|
||||
static final Log LOG = LogFactory.getLog(TestForceSplit.class);
|
||||
private static final byte[] tableName = Bytes.toBytes("test");
|
||||
private static final byte[] familyName = Bytes.toBytes("a");
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
this.conf.setInt("hbase.io.index.interval", 32);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests forcing split from client and having scanners successfully ride over split.
|
||||
* @throws Exception
|
||||
* @throws IOException
|
||||
*/
|
||||
@SuppressWarnings("unused")
|
||||
public void testForceSplit() throws Exception {
|
||||
// create the test table
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
htd.addFamily(new HColumnDescriptor(familyName));
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
admin.createTable(htd);
|
||||
final HTable table = new HTable(conf, tableName);
|
||||
byte[] k = new byte[3];
|
||||
int rowCount = 0;
|
||||
for (byte b1 = 'a'; b1 < 'z'; b1++) {
|
||||
for (byte b2 = 'a'; b2 < 'z'; b2++) {
|
||||
for (byte b3 = 'a'; b3 < 'z'; b3++) {
|
||||
k[0] = b1;
|
||||
k[1] = b2;
|
||||
k[2] = b3;
|
||||
Put put = new Put(k);
|
||||
put.add(familyName, new byte[0], k);
|
||||
table.put(put);
|
||||
rowCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get the initial layout (should just be one region)
|
||||
Map<HRegionInfo,HServerAddress> m = table.getRegionsInfo();
|
||||
System.out.println("Initial regions (" + m.size() + "): " + m);
|
||||
assertTrue(m.size() == 1);
|
||||
|
||||
// Verify row count
|
||||
Scan scan = new Scan();
|
||||
ResultScanner scanner = table.getScanner(scan);
|
||||
int rows = 0;
|
||||
for(Result result : scanner) {
|
||||
rows++;
|
||||
}
|
||||
scanner.close();
|
||||
assertEquals(rowCount, rows);
|
||||
|
||||
// Have an outstanding scan going on to make sure we can scan over splits.
|
||||
scan = new Scan();
|
||||
scanner = table.getScanner(scan);
|
||||
// Scan first row so we are into first region before split happens.
|
||||
scanner.next();
|
||||
|
||||
final AtomicInteger count = new AtomicInteger(0);
|
||||
Thread t = new Thread("CheckForSplit") {
|
||||
public void run() {
|
||||
for (int i = 0; i < 20; i++) {
|
||||
try {
|
||||
sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
continue;
|
||||
}
|
||||
// check again table = new HTable(conf, tableName);
|
||||
Map<HRegionInfo, HServerAddress> regions = null;
|
||||
try {
|
||||
regions = table.getRegionsInfo();
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
if (regions == null) continue;
|
||||
count.set(regions.size());
|
||||
if (count.get() >= 2) break;
|
||||
LOG.debug("Cycle waiting on split");
|
||||
}
|
||||
}
|
||||
};
|
||||
t.start();
|
||||
// tell the master to split the table
|
||||
admin.split(Bytes.toString(tableName));
|
||||
t.join();
|
||||
|
||||
// Verify row count
|
||||
rows = 1; // We counted one row above.
|
||||
for (Result result : scanner) {
|
||||
rows++;
|
||||
if (rows > rowCount) {
|
||||
scanner.close();
|
||||
assertTrue("Scanned more than expected (" + rowCount + ")", false);
|
||||
}
|
||||
}
|
||||
scanner.close();
|
||||
assertEquals(rowCount, rows);
|
||||
}
|
||||
}
|
|
@ -3405,16 +3405,6 @@ public class TestFromClientSide {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* For HADOOP-2579
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test (expected=TableNotFoundException.class)
|
||||
public void testTableNotFoundExceptionWithoutAnyTables() throws IOException {
|
||||
new HTable(TEST_UTIL.getConfiguration(),
|
||||
"testTableNotFoundExceptionWithoutAnyTables");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetClosestRowBefore() throws IOException {
|
||||
final byte [] tableAname = Bytes.toBytes("testGetClosestRowBefore");
|
||||
|
@ -3464,15 +3454,4 @@ public class TestFromClientSide {
|
|||
assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
|
||||
assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one));
|
||||
}
|
||||
|
||||
/**
|
||||
* For HADOOP-2579
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test (expected=TableExistsException.class)
|
||||
public void testTableNotFoundExceptionWithATable() throws IOException {
|
||||
final byte [] name = Bytes.toBytes("testTableNotFoundExceptionWithATable");
|
||||
TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY);
|
||||
TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY);
|
||||
}
|
||||
}
|
|
@ -30,7 +30,8 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
*
|
||||
* Test versions.
|
||||
* Does shutdown in middle of test to prove versions work across restart.
|
||||
*/
|
||||
public class TestGetRowVersions extends HBaseClusterTestCase {
|
||||
private static final Log LOG = LogFactory.getLog(TestGetRowVersions.class);
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.HBaseTestCase;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Tests HTablePool
|
||||
* Tests HTablePool.
|
||||
*/
|
||||
public class TestHTablePool extends HBaseTestCase {
|
||||
|
||||
|
@ -104,5 +104,4 @@ public class TestHTablePool extends HBaseTestCase {
|
|||
assertSame(table1, sameTable1);
|
||||
assertSame(table2, sameTable2);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue