2008/02/04 HBase is now a subproject of Hadoop. The first HBase release as a subproject will be release 0.1.0 which will be equivalent to the version of HBase included in Hadoop 0.16.0. In order to accomplish this, the HBase portion of HBASE-288 (formerly HADOOP-1398) has been backed out. Once 0.1.0 is frozen (depending mostly on changes to infrastructure due to becoming a sub project instead of a contrib project), this patch will re-appear on HBase trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@618518 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2008-02-05 02:36:26 +00:00
parent 39bff3580b
commit 9a88155bf0
9 changed files with 236 additions and 417 deletions

View File

@ -28,6 +28,15 @@ Trunk (unreleased changes)
Release 0.16.0
2008/02/04 HBase is now a subproject of Hadoop. The first HBase release as
a subproject will be release 0.1.0 which will be equivalent to
the version of HBase included in Hadoop 0.16.0. In order to
accomplish this, the HBase portion of HBASE-288 (formerly
HADOOP-1398) has been backed out. Once 0.1.0 is frozen (depending
mostly on changes to infrastructure due to becoming a sub project
instead of a contrib project), this patch will re-appear on HBase
trunk.
INCOMPATIBLE CHANGES
HADOOP-2056 A table with row keys containing colon fails to split regions
HADOOP-2079 Fix generated HLog, HRegion names

View File

@ -118,7 +118,6 @@ TOKEN: /** for HQL statements */
| <BLOCK: "block">
| <RECORD: "record">
| <IN_MEMORY: "in_memory">
| <BLOCK_CACHE_ENABLED: "block_cache_enabled">
| <BLOOMFILTER: "bloomfilter">
| <COUNTING_BLOOMFILTER: "counting_bloomfilter">
| <RETOUCHED_BLOOMFILTER: "retouched_bloomfilter">
@ -354,11 +353,6 @@ Map<String, Object> ColumnSpec() :
{
columnSpec.put("IN_MEMORY", true);
}
|
<BLOCK_CACHE_ENABLED>
{
columnSpec.put("BLOCK_CACHE_ENABLED", true);
}
|
<BLOOMFILTER>
<EQUALS>

View File

@ -75,7 +75,7 @@ public class HQLParser implements HQLParserConstants {
case SELECT:
case ENABLE:
case DISABLE:
case 69:
case 68:
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case HELP:
case ALTER:
@ -100,7 +100,7 @@ public class HQLParser implements HQLParserConstants {
jj_la1[0] = jj_gen;
;
}
jj_consume_token(69);
jj_consume_token(68);
break;
case 0:
jj_consume_token(0);
@ -390,7 +390,6 @@ public class HQLParser implements HQLParserConstants {
case MAX_LENGTH:
case COMPRESSION:
case IN_MEMORY:
case BLOCK_CACHE_ENABLED:
case BLOOMFILTER:
case VECTOR_SIZE:
case NUM_HASH:
@ -441,10 +440,6 @@ public class HQLParser implements HQLParserConstants {
jj_consume_token(IN_MEMORY);
columnSpec.put("IN_MEMORY", true);
break;
case BLOCK_CACHE_ENABLED:
jj_consume_token(BLOCK_CACHE_ENABLED);
columnSpec.put("BLOCK_CACHE_ENABLED", true);
break;
case BLOOMFILTER:
jj_consume_token(BLOOMFILTER);
jj_consume_token(EQUALS);
@ -1085,22 +1080,6 @@ public class HQLParser implements HQLParserConstants {
finally { jj_save(0, xla); }
}
final private boolean jj_3_1() {
if (jj_scan_token(ADD)) return true;
if (jj_3R_10()) return true;
return false;
}
final private boolean jj_3R_12() {
Token xsp;
xsp = jj_scanpos;
if (jj_scan_token(67)) {
jj_scanpos = xsp;
if (jj_scan_token(68)) return true;
}
return false;
}
final private boolean jj_3R_11() {
if (jj_scan_token(ID)) return true;
return false;
@ -1116,6 +1095,22 @@ public class HQLParser implements HQLParserConstants {
return false;
}
final private boolean jj_3_1() {
if (jj_scan_token(ADD)) return true;
if (jj_3R_10()) return true;
return false;
}
final private boolean jj_3R_12() {
Token xsp;
xsp = jj_scanpos;
if (jj_scan_token(66)) {
jj_scanpos = xsp;
if (jj_scan_token(67)) return true;
}
return false;
}
public HQLParserTokenManager token_source;
SimpleCharStream jj_input_stream;
public Token token, jj_nt;
@ -1138,10 +1133,10 @@ public class HQLParser implements HQLParserConstants {
jj_la1_0 = new int[] {0xf3ffe0,0xf3ffe1,0xf3ffe0,0x0,0x0,0x0,0x0,0x33dbc0,0x33dbc0,0x0,0x600,0x0,0x0,0x0,0x0,0x0,0x0,0x1000,0x0,0x80000000,0x0,0x2000000,0x0,0x3000000,0x8000000,0x3000000,0x80000000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,};
}
private static void jj_la1_1() {
jj_la1_1 = new int[] {0x0,0x0,0x0,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x0,0xe71c000,0xe0000,0x1c00000,0xe71c000,0x10,0x10,0x30000000,0x0,0x0,0x0,0x0,0xc0002000,0x0,0x0,0x0,0x0,0x1,0x2,0x10,0x0,0x80002000,0x80002000,0x80002000,0x0,0x80002000,0x10,0x10,0x10,0x80000000,0x0,0x80000000,};
jj_la1_1 = new int[] {0x0,0x0,0x0,0x40000000,0xc0000000,0xc0000000,0x40000000,0x40000000,0x40000000,0x40000000,0x0,0x731c000,0xe0000,0xe00000,0x731c000,0x10,0x10,0x18000000,0x0,0x0,0x0,0x0,0xe0002000,0x0,0x0,0x0,0x0,0x1,0x2,0x10,0x0,0xc0002000,0xc0002000,0xc0002000,0x0,0xc0002000,0x10,0x10,0x10,0xc0000000,0x0,0x40000000,};
}
private static void jj_la1_2() {
jj_la1_2 = new int[] {0x0,0x20,0x0,0x0,0x3,0x3,0x18,0x0,0x0,0x18,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x18,0x0,0x18,0x0,0x19,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x18,0x1,0x19,0x1,0x18,0x19,0x0,0x0,0x0,0x1,0x18,0x18,};
jj_la1_2 = new int[] {0x0,0x10,0x0,0x0,0x1,0x1,0xc,0x0,0x0,0xc,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xc,0x0,0xc,0x0,0xc,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xc,0x0,0xc,0x0,0xc,0xc,0x0,0x0,0x0,0x0,0xc,0xc,};
}
final private JJCalls[] jj_2_rtns = new JJCalls[1];
private boolean jj_rescan = false;
@ -1318,8 +1313,8 @@ public class HQLParser implements HQLParserConstants {
public ParseException generateParseException() {
jj_expentries.removeAllElements();
boolean[] la1tokens = new boolean[70];
for (int i = 0; i < 70; i++) {
boolean[] la1tokens = new boolean[69];
for (int i = 0; i < 69; i++) {
la1tokens[i] = false;
}
if (jj_kind >= 0) {
@ -1341,7 +1336,7 @@ public class HQLParser implements HQLParserConstants {
}
}
}
for (int i = 0; i < 70; i++) {
for (int i = 0; i < 69; i++) {
if (la1tokens[i]) {
jj_expentry = new int[1];
jj_expentry[0] = i;

View File

@ -52,22 +52,21 @@ public interface HQLParserConstants {
int BLOCK = 50;
int RECORD = 51;
int IN_MEMORY = 52;
int BLOCK_CACHE_ENABLED = 53;
int BLOOMFILTER = 54;
int COUNTING_BLOOMFILTER = 55;
int RETOUCHED_BLOOMFILTER = 56;
int VECTOR_SIZE = 57;
int NUM_HASH = 58;
int NUM_ENTRIES = 59;
int ADD = 60;
int CHANGE = 61;
int COUNT = 62;
int ID = 63;
int INTEGER_LITERAL = 64;
int FLOATING_POINT_LITERAL = 65;
int EXPONENT = 66;
int QUOTED_IDENTIFIER = 67;
int STRING_LITERAL = 68;
int BLOOMFILTER = 53;
int COUNTING_BLOOMFILTER = 54;
int RETOUCHED_BLOOMFILTER = 55;
int VECTOR_SIZE = 56;
int NUM_HASH = 57;
int NUM_ENTRIES = 58;
int ADD = 59;
int CHANGE = 60;
int COUNT = 61;
int ID = 62;
int INTEGER_LITERAL = 63;
int FLOATING_POINT_LITERAL = 64;
int EXPONENT = 65;
int QUOTED_IDENTIFIER = 66;
int STRING_LITERAL = 67;
int DEFAULT = 0;
@ -125,7 +124,6 @@ public interface HQLParserConstants {
"\"block\"",
"\"record\"",
"\"in_memory\"",
"\"block_cache_enabled\"",
"\"bloomfilter\"",
"\"counting_bloomfilter\"",
"\"retouched_bloomfilter\"",

View File

@ -64,8 +64,7 @@ struct ColumnDescriptor {
5:i32 maxValueLength = 2147483647,
6:string bloomFilterType = "NONE",
7:i32 bloomFilterVectorSize = 0,
8:i32 bloomFilterNbHashes = 0,
9:bool blockCacheEnabled = 0
8:i32 bloomFilterNbHashes = 0
}
/**

View File

@ -46,7 +46,6 @@ public class ColumnDescriptor implements TBase, java.io.Serializable {
public String bloomFilterType;
public int bloomFilterVectorSize;
public int bloomFilterNbHashes;
public boolean blockCacheEnabled;
public final Isset __isset = new Isset();
public static final class Isset {
@ -58,7 +57,6 @@ public class ColumnDescriptor implements TBase, java.io.Serializable {
public boolean bloomFilterType = false;
public boolean bloomFilterVectorSize = false;
public boolean bloomFilterNbHashes = false;
public boolean blockCacheEnabled = false;
}
public ColumnDescriptor() {
@ -76,8 +74,6 @@ public class ColumnDescriptor implements TBase, java.io.Serializable {
this.bloomFilterNbHashes = 0;
this.blockCacheEnabled = false;
}
public ColumnDescriptor(
@ -88,8 +84,7 @@ public class ColumnDescriptor implements TBase, java.io.Serializable {
int maxValueLength,
String bloomFilterType,
int bloomFilterVectorSize,
int bloomFilterNbHashes,
boolean blockCacheEnabled)
int bloomFilterNbHashes)
{
this();
this.name = name;
@ -108,8 +103,6 @@ public class ColumnDescriptor implements TBase, java.io.Serializable {
this.__isset.bloomFilterVectorSize = true;
this.bloomFilterNbHashes = bloomFilterNbHashes;
this.__isset.bloomFilterNbHashes = true;
this.blockCacheEnabled = blockCacheEnabled;
this.__isset.blockCacheEnabled = true;
}
public void read(TProtocol iprot) throws TException {
@ -187,14 +180,6 @@ public class ColumnDescriptor implements TBase, java.io.Serializable {
TProtocolUtil.skip(iprot, field.type);
}
break;
case 9:
if (field.type == TType.BOOL) {
this.blockCacheEnabled = iprot.readBool();
this.__isset.blockCacheEnabled = true;
} else {
TProtocolUtil.skip(iprot, field.type);
}
break;
default:
TProtocolUtil.skip(iprot, field.type);
break;
@ -262,12 +247,6 @@ public class ColumnDescriptor implements TBase, java.io.Serializable {
oprot.writeFieldBegin(field);
oprot.writeI32(this.bloomFilterNbHashes);
oprot.writeFieldEnd();
field.name = "blockCacheEnabled";
field.type = TType.BOOL;
field.id = 9;
oprot.writeFieldBegin(field);
oprot.writeBool(this.blockCacheEnabled);
oprot.writeFieldEnd();
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@ -290,8 +269,6 @@ public class ColumnDescriptor implements TBase, java.io.Serializable {
sb.append(this.bloomFilterVectorSize);
sb.append(",bloomFilterNbHashes:");
sb.append(this.bloomFilterNbHashes);
sb.append(",blockCacheEnabled:");
sb.append(this.blockCacheEnabled);
sb.append(")");
return sb.toString();
}

View File

@ -57,7 +57,7 @@ public class TestToString extends TestCase {
assertEquals("HRegionInfo",
"regionname: -ROOT-,,0, startKey: <>, endKey: <>, encodedName: 70236052, tableDesc: " +
"{name: -ROOT-, families: {info:={name: info, max versions: 1, " +
"compression: NONE, in memory: false, block cache enabled: false, " +
"max length: 2147483647, bloom filter: none}}}", hri.toString());
"compression: NONE, in memory: false, max length: 2147483647, bloom " +
"filter: none}}}", hri.toString());
}
}

View File

@ -1,121 +0,0 @@
/**
* Copyright 2008 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.io;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.io.DataInputBuffer;
public class TestBlockFSInputStream extends TestCase {
static class InMemoryFSInputStream extends FSInputStream {
private byte[] data;
private DataInputBuffer din = new DataInputBuffer();
public InMemoryFSInputStream(byte[] data) {
this.data = data;
din.reset(data, data.length);
}
@Override
public long getPos() throws IOException {
return din.getPosition();
}
@Override
public void seek(long pos) throws IOException {
if (pos > data.length) {
throw new IOException("Cannot seek after EOF");
}
din.reset(data, (int) pos, data.length - (int) pos);
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
@Override
public int read() throws IOException {
return din.read();
}
}
private byte[] data;
private BlockFSInputStream stream;
@Override
protected void setUp() throws Exception {
data = new byte[34];
for (int i = 0; i < data.length; i++) {
data[i] = (byte) i;
}
FSInputStream byteStream = new InMemoryFSInputStream(data);
stream = new BlockFSInputStream(byteStream, 34, 10);
}
public void testReadForwards() throws IOException {
for (int i = 0; i < data.length; i++) {
assertEquals(i, stream.getPos());
assertEquals(i, stream.read());
}
}
public void testReadBackwards() throws IOException {
for (int i = data.length - 1; i >= 0; i--) {
stream.seek(i);
assertEquals(i, stream.getPos());
assertEquals(i, stream.read());
}
}
public void testReadInChunks() throws IOException {
byte[] buf = new byte[data.length];
int chunkLength = 6;
assertEquals(6, stream.read(buf, 0, chunkLength));
assertEquals(4, stream.read(buf, 6, chunkLength));
assertEquals(6, stream.read(buf, 10, chunkLength));
assertEquals(4, stream.read(buf, 16, chunkLength));
assertEquals(6, stream.read(buf, 20, chunkLength));
assertEquals(4, stream.read(buf, 26, chunkLength));
assertEquals(4, stream.read(buf, 30, chunkLength));
assertEquals(0, stream.available());
assertEquals(-1, stream.read());
for (int i = 0; i < buf.length; i++) {
assertEquals(i, buf[i]);
}
}
}