HBASE-3006 Reading compressed HFile blocks causes way too many DFS RPC calls severly impacting performance--Now add fix I intended, a spelling mistake in HFile
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@997975 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
96f8cbe9e2
commit
e532293310
|
@ -523,6 +523,9 @@ Release 0.21.0 - Unreleased
|
||||||
HBASE-2986 multi writable can npe causing client hang
|
HBASE-2986 multi writable can npe causing client hang
|
||||||
HBASE-2979 Fix failing TestMultParrallel in hudson build
|
HBASE-2979 Fix failing TestMultParrallel in hudson build
|
||||||
HBASE-2899 hfile.min.blocksize.size ignored/documentation wrong
|
HBASE-2899 hfile.min.blocksize.size ignored/documentation wrong
|
||||||
|
HBASE-3006 Reading compressed HFile blocks causes way too many DFS RPC
|
||||||
|
calls severly impacting performance
|
||||||
|
(Kannan Muthukkaruppan via Stack)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-1760 Cleanup TODOs in HTable
|
HBASE-1760 Cleanup TODOs in HTable
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.io.hfile;
|
package org.apache.hadoop.hbase.io.hfile;
|
||||||
|
|
||||||
|
import java.io.BufferedInputStream;
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
import java.io.DataOutputStream;
|
import java.io.DataOutputStream;
|
||||||
|
@ -1051,9 +1052,14 @@ public class HFile {
|
||||||
// decompressor reading into next block -- IIRC, it just grabs a
|
// decompressor reading into next block -- IIRC, it just grabs a
|
||||||
// bunch of data w/o regard to whether decompressor is coming to end of a
|
// bunch of data w/o regard to whether decompressor is coming to end of a
|
||||||
// decompression.
|
// decompression.
|
||||||
|
|
||||||
|
// We use a buffer of DEFAULT_BLOCKSIZE size. This might be extreme.
|
||||||
|
// Could maybe do with less. Study and figure it: TODO
|
||||||
InputStream is = this.compressAlgo.createDecompressionStream(
|
InputStream is = this.compressAlgo.createDecompressionStream(
|
||||||
|
new BufferedInputStream(
|
||||||
new BoundedRangeFileInputStream(this.istream, offset, compressedSize,
|
new BoundedRangeFileInputStream(this.istream, offset, compressedSize,
|
||||||
pread),
|
pread),
|
||||||
|
Math.min(DEFAULT_BLOCKSIZE, compressedSize)),
|
||||||
decompressor, 0);
|
decompressor, 0);
|
||||||
buf = ByteBuffer.allocate(decompressedSize);
|
buf = ByteBuffer.allocate(decompressedSize);
|
||||||
IOUtils.readFully(is, buf.array(), 0, buf.capacity());
|
IOUtils.readFully(is, buf.array(), 0, buf.capacity());
|
||||||
|
|
Loading…
Reference in New Issue