HBASE-2252 Mapping a very big table kills region servers
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@930945 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
53f0d0db3e
commit
b1c1873abe
|
@ -482,6 +482,7 @@ Release 0.21.0 - Unreleased
|
|||
HBASE-2402 [stargate] set maxVersions on gets
|
||||
HBASE-2087 The wait on compaction because "Too many store files"
|
||||
holds up all flushing
|
||||
HBASE-2252 Mapping a very big table kills region servers
|
||||
|
||||
NEW FEATURES
|
||||
HBASE-1961 HBase EC2 scripts
|
||||
|
|
|
@ -62,6 +62,7 @@ public class TableRecordReaderImpl {
|
|||
Scan scan = new Scan(firstRow, endRow);
|
||||
scan.addColumns(trrInputColumns);
|
||||
scan.setFilter(trrRowFilter);
|
||||
scan.setCacheBlocks(false);
|
||||
this.scanner = this.htable.getScanner(scan);
|
||||
} else {
|
||||
LOG.debug("TIFB.restart, firstRow: " +
|
||||
|
|
|
@ -128,13 +128,12 @@ implements Configurable {
|
|||
scan.setMaxVersions(Integer.parseInt(conf.get(SCAN_MAXVERSIONS)));
|
||||
}
|
||||
|
||||
if (conf.get(SCAN_CACHEBLOCKS) != null) {
|
||||
scan.setCacheBlocks(Boolean.parseBoolean(conf.get(SCAN_CACHEBLOCKS)));
|
||||
}
|
||||
|
||||
if (conf.get(SCAN_CACHEDROWS) != null) {
|
||||
scan.setCaching(Integer.parseInt(conf.get(SCAN_CACHEDROWS)));
|
||||
}
|
||||
|
||||
// false by default, full table scans generate too much BC churn
|
||||
scan.setCacheBlocks((conf.getBoolean(SCAN_CACHEBLOCKS, false)));
|
||||
} catch (Exception e) {
|
||||
LOG.error(StringUtils.stringifyException(e));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue