HBASE-2252 Mapping a very big table kills region servers

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@930945 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jean-Daniel Cryans 2010-04-05 20:04:36 +00:00
parent 53f0d0db3e
commit b1c1873abe
3 changed files with 5 additions and 4 deletions

View File

@ -482,6 +482,7 @@ Release 0.21.0 - Unreleased
HBASE-2402 [stargate] set maxVersions on gets
HBASE-2087 The wait on compaction because "Too many store files"
holds up all flushing
HBASE-2252 Mapping a very big table kills region servers
NEW FEATURES
HBASE-1961 HBase EC2 scripts

View File

@ -62,6 +62,7 @@ public class TableRecordReaderImpl {
Scan scan = new Scan(firstRow, endRow);
scan.addColumns(trrInputColumns);
scan.setFilter(trrRowFilter);
scan.setCacheBlocks(false);
this.scanner = this.htable.getScanner(scan);
} else {
LOG.debug("TIFB.restart, firstRow: " +

View File

@ -128,13 +128,12 @@ implements Configurable {
scan.setMaxVersions(Integer.parseInt(conf.get(SCAN_MAXVERSIONS)));
}
if (conf.get(SCAN_CACHEBLOCKS) != null) {
scan.setCacheBlocks(Boolean.parseBoolean(conf.get(SCAN_CACHEBLOCKS)));
}
if (conf.get(SCAN_CACHEDROWS) != null) {
scan.setCaching(Integer.parseInt(conf.get(SCAN_CACHEDROWS)));
}
// false by default, full table scans generate too much BC churn
scan.setCacheBlocks((conf.getBoolean(SCAN_CACHEBLOCKS, false)));
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
}