HBASE-1995 Add configurable max value size check
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@885162 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
15e04fbc36
commit
c09a401202
|
@ -209,6 +209,8 @@ Release 0.21.0 - Unreleased
|
|||
HBASE-2012 [EC2] LZO support
|
||||
HBASE-2011 Add zktop like output to HBase's master UI (Lars George via
|
||||
Andrew Purtell)
|
||||
HBASE-1995 Add configurable max value size check (Lars George via Andrew
|
||||
Purtell)
|
||||
|
||||
NEW FEATURES
|
||||
HBASE-1901 "General" partitioner for "hbase-48" bulk (behind the api, write
|
||||
|
|
|
@ -136,6 +136,17 @@
|
|||
calls of next may take longer and longer times when the cache is empty.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.client.keyvalue.maxsize</name>
|
||||
<value>-1</value>
|
||||
<description>Specifies the combined maximum allowed size of a KeyValue
|
||||
instance. This is to set an upper boundary for a single entry saved in a
|
||||
storage file. Since they cannot be split it helps avoiding that a region
|
||||
cannot be split any further because the data is too large. It seems wise
|
||||
to set this to a fraction of the maximum region size. Setting it to zero
|
||||
or less disables the check.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.regionserver.lease.period</name>
|
||||
<value>60000</value>
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.UnknownScannerException;
|
||||
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
|
||||
|
@ -62,6 +63,7 @@ public class HTable implements HTableInterface {
|
|||
private boolean autoFlush;
|
||||
private long currentWriteBufferSize;
|
||||
protected int scannerCaching;
|
||||
private int maxKeyValueSize;
|
||||
|
||||
/**
|
||||
* Creates an object to access a HBase table
|
||||
|
@ -121,6 +123,7 @@ public class HTable implements HTableInterface {
|
|||
this.autoFlush = true;
|
||||
this.currentWriteBufferSize = 0;
|
||||
this.scannerCaching = conf.getInt("hbase.client.scanner.caching", 1);
|
||||
this.maxKeyValueSize = conf.getInt("hbase.client.keyvalue.maxsize", -1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -602,9 +605,18 @@ public class HTable implements HTableInterface {
|
|||
* @throws IllegalArgumentException
|
||||
*/
|
||||
private void validatePut(final Put put) throws IllegalArgumentException{
|
||||
if(put.isEmpty()) {
|
||||
if (put.isEmpty()) {
|
||||
throw new IllegalArgumentException("No columns to insert");
|
||||
}
|
||||
if (maxKeyValueSize > 0) {
|
||||
for (List<KeyValue> list : put.getFamilyMap().values()) {
|
||||
for (KeyValue kv : list) {
|
||||
if (kv.getLength() > maxKeyValueSize) {
|
||||
throw new IllegalArgumentException("KeyValue size too large");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -32,6 +32,7 @@ import java.util.UUID;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -39,8 +40,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.TableExistsException;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.filter.BinaryComparator;
|
||||
import org.apache.hadoop.hbase.filter.CompareFilter;
|
||||
import org.apache.hadoop.hbase.filter.Filter;
|
||||
|
@ -400,6 +399,26 @@ public class TestFromClientSide {
|
|||
System.out.println("Done.");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMaxKeyValueSize() throws Exception {
|
||||
byte [] TABLE = Bytes.toBytes("testMaxKeyValueSize");
|
||||
HBaseConfiguration conf = TEST_UTIL.getConfiguration();
|
||||
String oldMaxSize = conf.get("hbase.client.keyvalue.maxsize");
|
||||
HTable ht = TEST_UTIL.createTable(TABLE, FAMILY);
|
||||
byte[] value = new byte[4 * 1024 * 1024];
|
||||
Put put = new Put(ROW);
|
||||
put.add(FAMILY, QUALIFIER, value);
|
||||
ht.put(put);
|
||||
try {
|
||||
conf.setInt("hbase.client.keyvalue.maxsize", 2 * 1024 * 1024);
|
||||
put = new Put(ROW);
|
||||
put.add(FAMILY, QUALIFIER, VALUE);
|
||||
ht.put(put);
|
||||
throw new IOException("Inserting a too large KeyValue worked, should throw exception");
|
||||
} catch(Exception e) {}
|
||||
conf.set("hbase.client.keyvalue.maxsize", oldMaxSize);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFilters() throws Exception {
|
||||
byte [] TABLE = Bytes.toBytes("testFilters");
|
||||
|
|
Loading…
Reference in New Issue