HADOOP-2320 Committed TestGet2 is managled (breaks build).

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@599951 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2007-11-30 20:40:31 +00:00
parent 6b159e940e
commit 554146cc64
2 changed files with 21 additions and 169 deletions

View File

@ -7,7 +7,7 @@ Trunk (unreleased changes)
NEW FEATURES
HADOOP-2061 Add new Base64 dialects
HADOOP-2084 Add a LocalHBaseCluster
HADOOP-2068 RESTful interface
HADOOP-2068 RESTful interface (Bryan Duxbury via Stack)
OPTIMIZATIONS
@ -45,6 +45,7 @@ Trunk (unreleased changes)
HADOOP-2308 null regioninfo breaks meta scanner
HADOOP-2304 Abbreviated symbol parsing error of dir path in jar command
(Edward Yoon via Stack)
HADOOP-2320 Committed TestGet2 is managled (breaks build).
IMPROVEMENTS
HADOOP-2401 Add convenience put method that takes writable

View File

@ -1,171 +1,22 @@
package org.apache.hadoop.hbase;
import java.io.IOException;
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.dfs.MiniDFSCluster;
import org.apache.hadoop.hbase.filter.StopRowFilter;
import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
import org.apache.hadoop.io.Text;
/**
* {@link TestGet} is a medley of tests of get all done up as a single test.
* This class
*/
public class TestGet2 extends HBaseTestCase {
private final Log LOG = LogFactory.getLog(this.getClass().getName());
private MiniDFSCluster miniHdfs;
protected void setUp() throws Exception {
super.setUp();
this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
}
/**
* Tests for HADOOP-2161.
* @throws Exception
*/
public void testGetFull() throws Exception {
HRegion region = null;
HScannerInterface scanner = null;
HLog hlog = new HLog(this.miniHdfs.getFileSystem(), this.testDir,
this.conf, null);
try {
HTableDescriptor htd = createTableDescriptor(getName());
HRegionInfo hri = new HRegionInfo(htd, null, null);
region = new HRegion(this.testDir, hlog, this.miniHdfs.getFileSystem(),
this.conf, hri, null, null);
for (int i = 0; i < COLUMNS.length; i++) {
addContent(region, COLUMNS[i].toString());
}
// Find two rows to use doing getFull.
final Text arbitraryStartRow = new Text("b");
Text actualStartRow = null;
final Text arbitraryStopRow = new Text("c");
Text actualStopRow = null;
Text [] columns = new Text [] {new Text(COLFAMILY_NAME1)};
scanner = region.getScanner(columns,
arbitraryStartRow, HConstants.LATEST_TIMESTAMP,
new WhileMatchRowFilter(new StopRowFilter(arbitraryStopRow)));
HStoreKey key = new HStoreKey();
TreeMap<Text, byte[]> value = new TreeMap<Text, byte []>();
while (scanner.next(key, value)) {
if (actualStartRow == null) {
actualStartRow = new Text(key.getRow());
} else {
actualStopRow = key.getRow();
}
}
// Assert I got all out.
assertColumnsPresent(region, actualStartRow);
assertColumnsPresent(region, actualStopRow);
// Force a flush so store files come into play.
region.flushcache();
// Assert I got all out.
assertColumnsPresent(region, actualStartRow);
assertColumnsPresent(region, actualStopRow);
} finally {
if (scanner != null) {
scanner.close();
}
if (region != null) {
try {
region.close();
} catch (Exception e) {
e.printStackTrace();
}
}
hlog.closeAndDelete();
}
}
public void testGetAtTimestamp() throws IOException{
HRegion region = null;
HRegionIncommon region_incommon = null;
HLog hlog = new HLog(this.miniHdfs.getFileSystem(), this.testDir,
this.conf, null);
try {
HTableDescriptor htd = createTableDescriptor(getName());
HRegionInfo hri = new HRegionInfo(htd, null, null);
region = new HRegion(this.testDir, hlog, this.miniHdfs.getFileSystem(),
this.conf, hri, null, null);
region_incommon = new HRegionIncommon(region);
long right_now = System.currentTimeMillis();
long one_second_ago = right_now - 1000;
Text t = new Text("test_row");
long lockid = region_incommon.startBatchUpdate(t);
region_incommon.put(lockid, COLUMNS[0], "old text".getBytes());
region_incommon.commit(lockid, one_second_ago);
lockid = region_incommon.startBatchUpdate(t);
region_incommon.put(lockid, COLUMNS[0], "new text".getBytes());
region_incommon.commit(lockid, right_now);
assertCellValueEquals(region, t, COLUMNS[0], right_now, "new text");
assertCellValueEquals(region, t, COLUMNS[0], one_second_ago, "old text");
// Force a flush so store files come into play.
region_incommon.flushcache();
assertCellValueEquals(region, t, COLUMNS[0], right_now, "new text");
assertCellValueEquals(region, t, COLUMNS[0], one_second_ago, "old text");
} finally {
if (region != null) {
try {
region.close();
} catch (Exception e) {
e.printStackTrace();
}
}
hlog.closeAndDelete();
}
}
private void assertCellValueEquals(final HRegion region, final Text row,
final Text column, final long timestamp, final String value)
throws IOException {
Map<Text, byte[]> result = region.getFull(row, timestamp);
assertEquals("cell value at a given timestamp", new String(result.get(column)), value);
}
private void assertColumnsPresent(final HRegion r, final Text row)
throws IOException {
Map<Text, byte[]> result = r.getFull(row);
int columnCount = 0;
for (Map.Entry<Text, byte[]> e: result.entrySet()) {
columnCount++;
String column = e.getKey().toString();
boolean legitColumn = false;
for (int i = 0; i < COLUMNS.length; i++) {
// Assert value is same as row. This is 'nature' of the data added.
assertTrue(row.equals(new Text(e.getValue())));
if (COLUMNS[i].equals(new Text(column))) {
legitColumn = true;
break;
}
}
assertTrue("is legit column name", legitColumn);
}
assertEquals("count of columns", columnCount, COLUMNS.length);
}
protected void tearDown() throws Exception {
if (this.miniHdfs != null) {
this.miniHdfs.shutdown();
}
super.tearDown();
}
}
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.IOException;