HBASE-3006 Reading compressed HFile blocks causes way too many DFS RPC calls severly impacting performance

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@997968 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2010-09-17 03:56:16 +00:00
parent eab3e07e39
commit 763865630c
2 changed files with 12 additions and 3 deletions

View File

@ -523,6 +523,9 @@ Release 0.21.0 - Unreleased
HBASE-2986 multi writable can npe causing client hang
HBASE-2979 Fix failing TestMultParrallel in hudson build
HBASE-2899 hfile.min.blocksize.size ignored/documentation wrong
HBASE-3006 Reading compressed HFile blocks causes way too many DFS RPC
calls severly impacting performance
(Kannan Muthukkaruppan via Stack)
IMPROVEMENTS
HBASE-1760 Cleanup TODOs in HTable

View File

@ -19,6 +19,7 @@
*/
package org.apache.hadoop.hbase.io.hfile;
import java.io.BufferedInputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.DataOutputStream;
@ -1051,9 +1052,14 @@ public class HFile {
// decompressor reading into next block -- IIRC, it just grabs a
// bunch of data w/o regard to whether decompressor is coming to end of a
// decompression.
// We use a buffer of DEFAULT_BLOCKSIZE size. This might be extreme.
// Could maybe do with less. Study and figure it: TODO
InputStream is = this.compressAlgo.createDecompressionStream(
new BufferedInputStream(
new BoundedRangeFileInputStream(this.istream, offset, compressedSize,
pread),
Math.min(DEFAUT_BLOCKSIZE, compressedSize)),
decompressor, 0);
buf = ByteBuffer.allocate(decompressedSize);
IOUtils.readFully(is, buf.array(), 0, buf.capacity());