HBASE=6164 Correct the bug in block encoding usage in bulkload

Submitted by:	Anoop
Reviewed by:	Ted


git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1352220 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
ramkrishna 2012-06-20 17:33:19 +00:00
parent 4cb2174c29
commit 621ec576eb
7 changed files with 16 additions and 22 deletions

View File

@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Controls what kind of data block encoding is used. If data block encoding is
@ -33,6 +33,8 @@ import org.apache.hadoop.hbase.regionserver.StoreFile;
*/
@InterfaceAudience.Private
public interface HFileDataBlockEncoder {
/** Type of encoding used for data blocks in HFile. Stored in file info. */
byte[] DATA_BLOCK_ENCODING = Bytes.toBytes("DATA_BLOCK_ENCODING");
/**
* Converts a block from the on-disk format to the in-cache format. Called in
@ -71,11 +73,11 @@ public interface HFileDataBlockEncoder {
public boolean useEncodedScanner(boolean isCompaction);
/**
* Save metadata in StoreFile which will be written to disk
* @param storeFileWriter writer for a given StoreFile
* Save metadata in HFile which will be written to disk
* @param writer writer for a given HFile
* @exception IOException on disk problems
*/
public void saveMetadata(StoreFile.Writer storeFileWriter)
public void saveMetadata(HFile.Writer writer)
throws IOException;
/** @return the on-disk data block encoding */

View File

@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.Bytes;
import com.google.common.base.Preconditions;
@ -96,8 +95,7 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
public static HFileDataBlockEncoder createFromFileInfo(
FileInfo fileInfo, DataBlockEncoding preferredEncodingInCache)
throws IOException {
byte[] dataBlockEncodingType =
fileInfo.get(StoreFile.DATA_BLOCK_ENCODING);
byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING);
if (dataBlockEncodingType == null) {
return NoOpDataBlockEncoder.INSTANCE;
}
@ -128,10 +126,8 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
}
@Override
public void saveMetadata(StoreFile.Writer storeFileWriter)
throws IOException {
storeFileWriter.appendFileInfo(StoreFile.DATA_BLOCK_ENCODING,
onDisk.getNameInBytes());
public void saveMetadata(HFile.Writer writer) throws IOException {
writer.appendFileInfo(DATA_BLOCK_ENCODING, onDisk.getNameInBytes());
}
@Override

View File

@ -334,6 +334,8 @@ public class HFileWriterV1 extends AbstractHFileWriter {
if (this.outputStream == null) {
return;
}
// Save data block encoder metadata in the file info.
blockEncoder.saveMetadata(this);
// Write out the end of the data blocks, then write meta data blocks.
// followed by fileinfo, data block index and meta block index.

View File

@ -358,6 +358,8 @@ public class HFileWriterV2 extends AbstractHFileWriter {
if (outputStream == null) {
return;
}
// Save data block encoder metadata in the file info.
blockEncoder.saveMetadata(this);
// Write out the end of the data blocks, then write meta data blocks.
// followed by fileinfo, data block index and meta block index.

View File

@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
import org.apache.hadoop.hbase.regionserver.StoreFile;
/**
* Does not perform any kind of encoding/decoding.
@ -71,7 +70,7 @@ public class NoOpDataBlockEncoder implements HFileDataBlockEncoder {
}
@Override
public void saveMetadata(StoreFile.Writer storeFileWriter) {
public void saveMetadata(HFile.Writer writer) {
}
@Override

View File

@ -137,10 +137,6 @@ public class StoreFile extends SchemaConfigured {
/** Key for timestamp of earliest-put in metadata*/
public static final byte[] EARLIEST_PUT_TS = Bytes.toBytes("EARLIEST_PUT_TS");
/** Type of encoding used for data blocks in HFile. Stored in file info. */
public static final byte[] DATA_BLOCK_ENCODING =
Bytes.toBytes("DATA_BLOCK_ENCODING");
// Make default block size for StoreFiles 8k while testing. TODO: FIX!
// Need to make it 8k for testing.
public static final int DEFAULT_BLOCKSIZE_SMALL = 8 * 1024;
@ -1208,9 +1204,6 @@ public class StoreFile extends SchemaConfigured {
}
public void close() throws IOException {
// Save data block encoder metadata in the file info.
dataBlockEncoder.saveMetadata(this);
boolean hasGeneralBloom = this.closeGeneralBloomFilter();
boolean hasDeleteFamilyBloom = this.closeDeleteFamilyBloomFilter();

View File

@ -913,7 +913,7 @@ public class TestStoreFile extends HBaseTestCase {
StoreFile.Reader reader = storeFile.createReader();
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
byte[] value = fileInfo.get(StoreFile.DATA_BLOCK_ENCODING);
byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
}