HBASE-9870 HFileDataBlockEncoderImpl#diskToCacheFormat uses wrong format
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1541629 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
2a057457d0
commit
fe5865d9e3
|
@ -71,7 +71,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
// These constants are used as FileInfo keys
|
||||
public static final String COMPRESSION = "COMPRESSION";
|
||||
public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
|
||||
public static final String ENCODE_ON_DISK =
|
||||
public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
|
||||
"ENCODE_ON_DISK";
|
||||
public static final String DATA_BLOCK_ENCODING =
|
||||
"DATA_BLOCK_ENCODING";
|
||||
|
@ -209,7 +209,6 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
|
||||
DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
|
||||
DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
|
||||
DEFAULT_VALUES.put(ENCODE_ON_DISK, String.valueOf(DEFAULT_ENCODE_ON_DISK));
|
||||
DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
|
||||
DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
|
||||
DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
|
||||
|
@ -421,7 +420,6 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
setTimeToLive(timeToLive);
|
||||
setCompressionType(Compression.Algorithm.
|
||||
valueOf(compression.toUpperCase()));
|
||||
setEncodeOnDisk(encodeOnDisk);
|
||||
setDataBlockEncoding(DataBlockEncoding.
|
||||
valueOf(dataBlockEncoding.toUpperCase()));
|
||||
setBloomFilterType(BloomType.
|
||||
|
@ -629,29 +627,19 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
}
|
||||
|
||||
/** @return data block encoding algorithm used on disk */
|
||||
@Deprecated
|
||||
public DataBlockEncoding getDataBlockEncodingOnDisk() {
|
||||
String encodeOnDiskStr = getValue(ENCODE_ON_DISK);
|
||||
boolean encodeOnDisk;
|
||||
if (encodeOnDiskStr == null) {
|
||||
encodeOnDisk = DEFAULT_ENCODE_ON_DISK;
|
||||
} else {
|
||||
encodeOnDisk = Boolean.valueOf(encodeOnDiskStr);
|
||||
}
|
||||
|
||||
if (!encodeOnDisk) {
|
||||
// No encoding on disk.
|
||||
return DataBlockEncoding.NONE;
|
||||
}
|
||||
return getDataBlockEncoding();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the flag indicating that we only want to encode data block in cache
|
||||
* but not on disk.
|
||||
* This method does nothing now. Flag ENCODE_ON_DISK is not used
|
||||
* any more. Data blocks have the same encoding in cache as on disk.
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
@Deprecated
|
||||
public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
|
||||
return setValue(ENCODE_ON_DISK, String.valueOf(encodeOnDisk));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -173,7 +173,8 @@ public enum DataBlockEncoding {
|
|||
}
|
||||
|
||||
DataBlockEncoding algorithm = idToEncoding.get(encoderId);
|
||||
return algorithm.getClass().equals(encoder.getClass());
|
||||
String encoderCls = encoder.getClass().getName();
|
||||
return encoderCls.equals(algorithm.encoderCls);
|
||||
}
|
||||
|
||||
public static DataBlockEncoding getEncodingById(short dataBlockEncodingId) {
|
||||
|
|
|
@ -52,8 +52,7 @@ public class HFileContext implements HeapSize, Cloneable {
|
|||
private int bytesPerChecksum = DEFAULT_BYTES_PER_CHECKSUM;
|
||||
/** Number of uncompressed bytes we allow per block. */
|
||||
private int blocksize = HConstants.DEFAULT_BLOCKSIZE;
|
||||
private DataBlockEncoding encodingOnDisk = DataBlockEncoding.NONE;
|
||||
private DataBlockEncoding encodingInCache = DataBlockEncoding.NONE;
|
||||
private DataBlockEncoding encoding = DataBlockEncoding.NONE;
|
||||
|
||||
//Empty constructor. Go with setters
|
||||
public HFileContext() {
|
||||
|
@ -71,14 +70,12 @@ public class HFileContext implements HeapSize, Cloneable {
|
|||
this.checksumType = context.checksumType;
|
||||
this.bytesPerChecksum = context.bytesPerChecksum;
|
||||
this.blocksize = context.blocksize;
|
||||
this.encodingOnDisk = context.encodingOnDisk;
|
||||
this.encodingInCache = context.encodingInCache;
|
||||
this.encoding = context.encoding;
|
||||
}
|
||||
|
||||
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
|
||||
Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
|
||||
int bytesPerChecksum, int blockSize, DataBlockEncoding encodingOnDisk,
|
||||
DataBlockEncoding encodingInCache) {
|
||||
int bytesPerChecksum, int blockSize, DataBlockEncoding encoding) {
|
||||
this.usesHBaseChecksum = useHBaseChecksum;
|
||||
this.includesMvcc = includesMvcc;
|
||||
this.includesTags = includesTags;
|
||||
|
@ -87,8 +84,9 @@ public class HFileContext implements HeapSize, Cloneable {
|
|||
this.checksumType = checksumType;
|
||||
this.bytesPerChecksum = bytesPerChecksum;
|
||||
this.blocksize = blockSize;
|
||||
this.encodingOnDisk = encodingOnDisk;
|
||||
this.encodingInCache = encodingInCache;
|
||||
if (encoding != null) {
|
||||
this.encoding = encoding;
|
||||
}
|
||||
}
|
||||
|
||||
public Algorithm getCompression() {
|
||||
|
@ -135,12 +133,8 @@ public class HFileContext implements HeapSize, Cloneable {
|
|||
return blocksize;
|
||||
}
|
||||
|
||||
public DataBlockEncoding getEncodingOnDisk() {
|
||||
return encodingOnDisk;
|
||||
}
|
||||
|
||||
public DataBlockEncoding getEncodingInCache() {
|
||||
return encodingInCache;
|
||||
public DataBlockEncoding getDataBlockEncoding() {
|
||||
return encoding;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -151,8 +145,8 @@ public class HFileContext implements HeapSize, Cloneable {
|
|||
@Override
|
||||
public long heapSize() {
|
||||
long size = ClassSize.align(ClassSize.OBJECT +
|
||||
// Algorithm reference, encodingondisk, encodingincache, checksumtype
|
||||
4 * ClassSize.REFERENCE +
|
||||
// Algorithm reference, encoding, checksumtype
|
||||
3 * ClassSize.REFERENCE +
|
||||
2 * Bytes.SIZEOF_INT +
|
||||
// usesHBaseChecksum, includesMvcc, includesTags and compressTags
|
||||
4 * Bytes.SIZEOF_BOOLEAN);
|
||||
|
@ -170,8 +164,7 @@ public class HFileContext implements HeapSize, Cloneable {
|
|||
clonnedCtx.checksumType = this.checksumType;
|
||||
clonnedCtx.bytesPerChecksum = this.bytesPerChecksum;
|
||||
clonnedCtx.blocksize = this.blocksize;
|
||||
clonnedCtx.encodingOnDisk = this.encodingOnDisk;
|
||||
clonnedCtx.encodingInCache = this.encodingInCache;
|
||||
clonnedCtx.encoding = this.encoding;
|
||||
return clonnedCtx;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,8 +47,7 @@ public class HFileContextBuilder {
|
|||
private int bytesPerChecksum = DEFAULT_BYTES_PER_CHECKSUM;
|
||||
/** Number of uncompressed bytes we allow per block. */
|
||||
private int blocksize = HConstants.DEFAULT_BLOCKSIZE;
|
||||
private DataBlockEncoding encodingOnDisk = DataBlockEncoding.NONE;
|
||||
private DataBlockEncoding encodingInCache = DataBlockEncoding.NONE;
|
||||
private DataBlockEncoding encoding = DataBlockEncoding.NONE;
|
||||
|
||||
public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) {
|
||||
this.usesHBaseChecksum = useHBaseCheckSum;
|
||||
|
@ -90,18 +89,13 @@ public class HFileContextBuilder {
|
|||
return this;
|
||||
}
|
||||
|
||||
public HFileContextBuilder withDataBlockEncodingOnDisk(DataBlockEncoding encodingOnDisk) {
|
||||
this.encodingOnDisk = encodingOnDisk;
|
||||
return this;
|
||||
}
|
||||
|
||||
public HFileContextBuilder withDataBlockEncodingInCache(DataBlockEncoding encodingInCache) {
|
||||
this.encodingInCache = encodingInCache;
|
||||
public HFileContextBuilder withDataBlockEncoding(DataBlockEncoding encoding) {
|
||||
this.encoding = encoding;
|
||||
return this;
|
||||
}
|
||||
|
||||
public HFileContext build() {
|
||||
return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compression,
|
||||
compressTags, checksumType, bytesPerChecksum, blocksize, encodingOnDisk, encodingInCache);
|
||||
compressTags, checksumType, bytesPerChecksum, blocksize, encoding);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable;
|
|||
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
||||
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
|
@ -58,7 +57,6 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
|||
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.mortbay.log.Log;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
|
@ -467,14 +465,14 @@ public abstract class BaseRegionObserver implements RegionObserver {
|
|||
@Override
|
||||
public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||
FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
|
||||
DataBlockEncoding preferredEncodingInCache, Reference r, Reader reader) throws IOException {
|
||||
Reference r, Reader reader) throws IOException {
|
||||
return reader;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Reader postStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||
FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
|
||||
DataBlockEncoding preferredEncodingInCache, Reference r, Reader reader) throws IOException {
|
||||
Reference r, Reader reader) throws IOException {
|
||||
return reader;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,23 +30,23 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Append;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Increment;
|
||||
import org.apache.hadoop.hbase.client.Mutation;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
||||
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
|
||||
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
||||
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
|
||||
import org.apache.hadoop.hbase.regionserver.OperationStatus;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.ScanType;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
|
@ -55,9 +55,9 @@ import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
|
|||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
|
||||
/**
|
||||
* Coprocessors implement this interface to observe and mediate client actions
|
||||
|
@ -1026,7 +1026,6 @@ public interface RegionObserver extends Coprocessor {
|
|||
* @param in {@link FSDataInputStreamWrapper}
|
||||
* @param size Full size of the file
|
||||
* @param cacheConf
|
||||
* @param preferredEncodingInCache
|
||||
* @param r original reference file. This will be not null only when reading a split file.
|
||||
* @param reader the base reader, if not {@code null}, from previous RegionObserver in the chain
|
||||
* @return a Reader instance to use instead of the base reader if overriding
|
||||
|
@ -1035,8 +1034,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
*/
|
||||
StoreFile.Reader preStoreFileReaderOpen(final ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||
final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size,
|
||||
final CacheConfig cacheConf, final DataBlockEncoding preferredEncodingInCache,
|
||||
final Reference r, StoreFile.Reader reader) throws IOException;
|
||||
final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException;
|
||||
|
||||
/**
|
||||
* Called after the creation of Reader for a store file.
|
||||
|
@ -1047,7 +1045,6 @@ public interface RegionObserver extends Coprocessor {
|
|||
* @param in {@link FSDataInputStreamWrapper}
|
||||
* @param size Full size of the file
|
||||
* @param cacheConf
|
||||
* @param preferredEncodingInCache
|
||||
* @param r original reference file. This will be not null only when reading a split file.
|
||||
* @param reader the base reader instance
|
||||
* @return The reader to use
|
||||
|
@ -1055,8 +1052,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
*/
|
||||
StoreFile.Reader postStoreFileReaderOpen(final ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||
final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size,
|
||||
final CacheConfig cacheConf, final DataBlockEncoding preferredEncodingInCache,
|
||||
final Reference r, StoreFile.Reader reader) throws IOException;
|
||||
final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException;
|
||||
|
||||
/**
|
||||
* Called after a new cell has been created during an increment operation, but before
|
||||
|
|
|
@ -24,13 +24,11 @@ import java.nio.ByteBuffer;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
|
@ -67,13 +65,11 @@ public class HalfStoreFileReader extends StoreFile.Reader {
|
|||
* @param p path to hfile
|
||||
* @param cacheConf
|
||||
* @param r original reference file (contains top or bottom)
|
||||
* @param preferredEncodingInCache
|
||||
* @throws IOException
|
||||
*/
|
||||
public HalfStoreFileReader(final FileSystem fs, final Path p,
|
||||
final CacheConfig cacheConf, final Reference r,
|
||||
DataBlockEncoding preferredEncodingInCache) throws IOException {
|
||||
super(fs, p, cacheConf, preferredEncodingInCache);
|
||||
final CacheConfig cacheConf, final Reference r) throws IOException {
|
||||
super(fs, p, cacheConf);
|
||||
// This is not actual midkey for this half-file; its just border
|
||||
// around which we split top and bottom. Have to look in files to find
|
||||
// actual last and first keys for bottom and top halves. Half-files don't
|
||||
|
@ -92,13 +88,11 @@ public class HalfStoreFileReader extends StoreFile.Reader {
|
|||
* @param size Full size of the hfile file
|
||||
* @param cacheConf
|
||||
* @param r original reference file (contains top or bottom)
|
||||
* @param preferredEncodingInCache
|
||||
* @throws IOException
|
||||
*/
|
||||
public HalfStoreFileReader(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in,
|
||||
long size, final CacheConfig cacheConf, final Reference r,
|
||||
final DataBlockEncoding preferredEncodingInCache) throws IOException {
|
||||
super(fs, p, in, size, cacheConf, preferredEncodingInCache);
|
||||
long size, final CacheConfig cacheConf, final Reference r) throws IOException {
|
||||
super(fs, p, in, size, cacheConf);
|
||||
// This is not actual midkey for this half-file; its just border
|
||||
// around which we split top and bottom. Have to look in files to find
|
||||
// actual last and first keys for bottom and top halves. Half-files don't
|
||||
|
|
|
@ -325,8 +325,8 @@ public abstract class AbstractHFileReader implements HFile.Reader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DataBlockEncoding getEncodingOnDisk() {
|
||||
return dataBlockEncoder.getEncodingOnDisk();
|
||||
public DataBlockEncoding getDataBlockEncoding() {
|
||||
return dataBlockEncoder.getDataBlockEncoding();
|
||||
}
|
||||
|
||||
public abstract int getMajorVersion();
|
||||
|
|
|
@ -114,10 +114,9 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
|
|||
this.path = path;
|
||||
this.name = path != null ? path.getName() : outputStream.toString();
|
||||
this.hFileContext = fileContext;
|
||||
if (hFileContext.getEncodingOnDisk() != DataBlockEncoding.NONE
|
||||
|| hFileContext.getEncodingInCache() != DataBlockEncoding.NONE) {
|
||||
this.blockEncoder = new HFileDataBlockEncoderImpl(hFileContext.getEncodingOnDisk(),
|
||||
hFileContext.getEncodingInCache());
|
||||
DataBlockEncoding encoding = hFileContext.getDataBlockEncoding();
|
||||
if (encoding != DataBlockEncoding.NONE) {
|
||||
this.blockEncoder = new HFileDataBlockEncoderImpl(encoding);
|
||||
} else {
|
||||
this.blockEncoder = NoOpDataBlockEncoder.INSTANCE;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.util.ClassSize;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class BlockCacheKey implements HeapSize, java.io.Serializable {
|
||||
private static final long serialVersionUID = -5199992013113130534L;
|
||||
private final String hfileName;
|
||||
private final long offset;
|
||||
private final DataBlockEncoding encoding;
|
||||
|
@ -39,8 +40,8 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable {
|
|||
// We add encoding to the cache key only for data blocks. If the block type
|
||||
// is unknown (this should never be the case in production), we just use
|
||||
// the provided encoding, because it might be a data block.
|
||||
this.encoding = (blockType == null || blockType.isData()) ? encoding :
|
||||
DataBlockEncoding.NONE;
|
||||
this.encoding = (encoding != null && (blockType == null
|
||||
|| blockType.isData())) ? encoding : DataBlockEncoding.NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -62,7 +63,7 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable {
|
|||
public boolean equals(Object o) {
|
||||
if (o instanceof BlockCacheKey) {
|
||||
BlockCacheKey k = (BlockCacheKey) o;
|
||||
return offset == k.offset
|
||||
return offset == k.offset && encoding == k.encoding
|
||||
&& (hfileName == null ? k.hfileName == null : hfileName
|
||||
.equals(k.hfileName));
|
||||
} else {
|
||||
|
|
|
@ -66,11 +66,10 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import com.google.protobuf.ZeroCopyLiteralByteString;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.ZeroCopyLiteralByteString;
|
||||
|
||||
/**
|
||||
* File format for hbase.
|
||||
|
@ -497,7 +496,7 @@ public class HFile {
|
|||
/** Close method with optional evictOnClose */
|
||||
void close(boolean evictOnClose) throws IOException;
|
||||
|
||||
DataBlockEncoding getEncodingOnDisk();
|
||||
DataBlockEncoding getDataBlockEncoding();
|
||||
|
||||
boolean hasMVCCInfo();
|
||||
}
|
||||
|
@ -510,13 +509,12 @@ public class HFile {
|
|||
* @param fsdis stream of path's file
|
||||
* @param size max size of the trailer.
|
||||
* @param cacheConf Cache configuation values, cannot be null.
|
||||
* @param preferredEncodingInCache
|
||||
* @param hfs
|
||||
* @return an appropriate instance of HFileReader
|
||||
* @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
|
||||
*/
|
||||
private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis,
|
||||
long size, CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache,
|
||||
long size, CacheConfig cacheConf,
|
||||
HFileSystem hfs) throws IOException {
|
||||
FixedFileTrailer trailer = null;
|
||||
try {
|
||||
|
@ -526,10 +524,10 @@ public class HFile {
|
|||
switch (trailer.getMajorVersion()) {
|
||||
case 2:
|
||||
return new HFileReaderV2(
|
||||
path, trailer, fsdis, size, cacheConf, preferredEncodingInCache, hfs);
|
||||
path, trailer, fsdis, size, cacheConf, hfs);
|
||||
case 3 :
|
||||
return new HFileReaderV3(
|
||||
path, trailer, fsdis, size, cacheConf, preferredEncodingInCache, hfs);
|
||||
path, trailer, fsdis, size, cacheConf, hfs);
|
||||
default:
|
||||
throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion());
|
||||
}
|
||||
|
@ -543,35 +541,17 @@ public class HFile {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param fs A file system
|
||||
* @param path Path to HFile
|
||||
* @param cacheConf Cache configuration for hfile's contents
|
||||
* @param preferredEncodingInCache Preferred in-cache data encoding algorithm.
|
||||
* @return A version specific Hfile Reader
|
||||
* @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
|
||||
*/
|
||||
public static Reader createReaderWithEncoding(
|
||||
FileSystem fs, Path path, CacheConfig cacheConf,
|
||||
DataBlockEncoding preferredEncodingInCache) throws IOException {
|
||||
FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path);
|
||||
return pickReaderVersion(path, stream, fs.getFileStatus(path).getLen(),
|
||||
cacheConf, preferredEncodingInCache, stream.getHfs());
|
||||
}
|
||||
|
||||
/**
|
||||
* @param fs A file system
|
||||
* @param path Path to HFile
|
||||
* @param fsdis a stream of path's file
|
||||
* @param size max size of the trailer.
|
||||
* @param cacheConf Cache configuration for hfile's contents
|
||||
* @param preferredEncodingInCache Preferred in-cache data encoding algorithm.
|
||||
* @return A version specific Hfile Reader
|
||||
* @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
|
||||
*/
|
||||
public static Reader createReaderWithEncoding(FileSystem fs, Path path,
|
||||
FSDataInputStreamWrapper fsdis, long size, CacheConfig cacheConf,
|
||||
DataBlockEncoding preferredEncodingInCache) throws IOException {
|
||||
public static Reader createReader(FileSystem fs, Path path,
|
||||
FSDataInputStreamWrapper fsdis, long size, CacheConfig cacheConf) throws IOException {
|
||||
HFileSystem hfs = null;
|
||||
|
||||
// If the fs is not an instance of HFileSystem, then create an
|
||||
|
@ -583,7 +563,7 @@ public class HFile {
|
|||
} else {
|
||||
hfs = (HFileSystem)fs;
|
||||
}
|
||||
return pickReaderVersion(path, fsdis, size, cacheConf, preferredEncodingInCache, hfs);
|
||||
return pickReaderVersion(path, fsdis, size, cacheConf, hfs);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -597,8 +577,9 @@ public class HFile {
|
|||
public static Reader createReader(
|
||||
FileSystem fs, Path path, CacheConfig cacheConf) throws IOException {
|
||||
Preconditions.checkNotNull(cacheConf, "Cannot create Reader with null CacheConf");
|
||||
return createReaderWithEncoding(fs, path, cacheConf,
|
||||
DataBlockEncoding.NONE);
|
||||
FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path);
|
||||
return pickReaderVersion(path, stream, fs.getFileStatus(path).getLen(),
|
||||
cacheConf, stream.getHfs());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -608,7 +589,7 @@ public class HFile {
|
|||
FSDataInputStream fsdis, long size, CacheConfig cacheConf)
|
||||
throws IOException {
|
||||
FSDataInputStreamWrapper wrapper = new FSDataInputStreamWrapper(fsdis);
|
||||
return pickReaderVersion(path, wrapper, size, cacheConf, DataBlockEncoding.NONE, null);
|
||||
return pickReaderVersion(path, wrapper, size, cacheConf, null);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -689,7 +689,7 @@ public class HFileBlock implements Cacheable {
|
|||
defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null,
|
||||
HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
|
||||
dataBlockEncodingCtx = this.dataBlockEncoder
|
||||
.newOnDiskDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
|
||||
.newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
|
||||
|
||||
if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) {
|
||||
throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
|
||||
|
@ -1025,8 +1025,7 @@ public class HFileBlock implements Cacheable {
|
|||
.withBytesPerCheckSum(0)
|
||||
.withChecksumType(ChecksumType.NULL) // no checksums in cached data
|
||||
.withCompression(fileContext.getCompression())
|
||||
.withDataBlockEncodingInCache(fileContext.getEncodingInCache())
|
||||
.withDataBlockEncodingOnDisk(fileContext.getEncodingOnDisk())
|
||||
.withDataBlockEncoding(fileContext.getDataBlockEncoding())
|
||||
.withHBaseCheckSum(fileContext.isUseHBaseChecksum())
|
||||
.withCompressTags(fileContext.isCompressTags())
|
||||
.withIncludesMvcc(fileContext.isIncludesMvcc())
|
||||
|
@ -1256,10 +1255,6 @@ public class HFileBlock implements Cacheable {
|
|||
* does or doesn't do checksum validations in the filesystem */
|
||||
protected FSDataInputStreamWrapper streamWrapper;
|
||||
|
||||
/** Data block encoding used to read from file */
|
||||
protected HFileDataBlockEncoder dataBlockEncoder =
|
||||
NoOpDataBlockEncoder.INSTANCE;
|
||||
|
||||
private HFileBlockDecodingContext encodedBlockDecodingCtx;
|
||||
|
||||
private HFileBlockDefaultDecodingContext defaultDecodingCtx;
|
||||
|
@ -1512,7 +1507,7 @@ public class HFileBlock implements Cacheable {
|
|||
if (isCompressed) {
|
||||
// This will allocate a new buffer but keep header bytes.
|
||||
b.allocateBuffer(nextBlockOnDiskSize > 0);
|
||||
if (b.blockType.equals(BlockType.ENCODED_DATA)) {
|
||||
if (b.blockType == BlockType.ENCODED_DATA) {
|
||||
encodedBlockDecodingCtx.prepareDecoding(b.getOnDiskSizeWithoutHeader(),
|
||||
b.getUncompressedSizeWithoutHeader(), b.getBufferWithoutHeader(), onDiskBlock,
|
||||
hdrSize);
|
||||
|
@ -1557,8 +1552,7 @@ public class HFileBlock implements Cacheable {
|
|||
}
|
||||
|
||||
void setDataBlockEncoder(HFileDataBlockEncoder encoder) {
|
||||
this.dataBlockEncoder = encoder;
|
||||
encodedBlockDecodingCtx = encoder.newOnDiskDataBlockDecodingContext(this.fileContext);
|
||||
encodedBlockDecodingCtx = encoder.newDataBlockDecodingContext(this.fileContext);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -112,7 +112,6 @@ public class HFileBlockIndex {
|
|||
private byte[][] blockKeys;
|
||||
private long[] blockOffsets;
|
||||
private int[] blockDataSizes;
|
||||
private int rootByteSize = 0;
|
||||
private int rootCount = 0;
|
||||
|
||||
// Mid-key metadata.
|
||||
|
@ -262,8 +261,7 @@ public class HFileBlockIndex {
|
|||
}
|
||||
|
||||
// Found a data block, break the loop and check our level in the tree.
|
||||
if (block.getBlockType().equals(BlockType.DATA) ||
|
||||
block.getBlockType().equals(BlockType.ENCODED_DATA)) {
|
||||
if (block.getBlockType().isData()) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -423,9 +421,7 @@ public class HFileBlockIndex {
|
|||
blockOffsets[rootCount] = offset;
|
||||
blockKeys[rootCount] = key;
|
||||
blockDataSizes[rootCount] = dataSize;
|
||||
|
||||
rootCount++;
|
||||
rootByteSize += SECONDARY_INDEX_ENTRY_OVERHEAD + key.length;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -672,7 +668,7 @@ public class HFileBlockIndex {
|
|||
@Override
|
||||
public long heapSize() {
|
||||
long heapSize = ClassSize.align(6 * ClassSize.REFERENCE +
|
||||
3 * Bytes.SIZEOF_INT + ClassSize.OBJECT);
|
||||
2 * Bytes.SIZEOF_INT + ClassSize.OBJECT);
|
||||
|
||||
// Mid-key metadata.
|
||||
heapSize += MID_KEY_METADATA_SIZE;
|
||||
|
|
|
@ -34,24 +34,6 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
public interface HFileDataBlockEncoder {
|
||||
/** Type of encoding used for data blocks in HFile. Stored in file info. */
|
||||
byte[] DATA_BLOCK_ENCODING = Bytes.toBytes("DATA_BLOCK_ENCODING");
|
||||
|
||||
/**
|
||||
* Converts a block from the on-disk format to the in-cache format. Called in
|
||||
* the following cases:
|
||||
* <ul>
|
||||
* <li>After an encoded or unencoded data block is read from disk, but before
|
||||
* it is put into the cache.</li>
|
||||
* <li>To convert brand-new blocks to the in-cache format when doing
|
||||
* cache-on-write.</li>
|
||||
* </ul>
|
||||
* @param block a block in an on-disk format (read from HFile or freshly
|
||||
* generated).
|
||||
* @param isCompaction
|
||||
* @return non null block which is coded according to the settings.
|
||||
*/
|
||||
HFileBlock diskToCacheFormat(
|
||||
HFileBlock block, boolean isCompaction
|
||||
);
|
||||
|
||||
/**
|
||||
* Should be called before an encoded or unencoded data block is written to
|
||||
|
@ -69,10 +51,9 @@ public interface HFileDataBlockEncoder {
|
|||
|
||||
/**
|
||||
* Decides whether we should use a scanner over encoded blocks.
|
||||
* @param isCompaction whether we are in a compaction.
|
||||
* @return Whether to use encoded scanner.
|
||||
*/
|
||||
boolean useEncodedScanner(boolean isCompaction);
|
||||
boolean useEncodedScanner();
|
||||
|
||||
/**
|
||||
* Save metadata in HFile which will be written to disk
|
||||
|
@ -82,17 +63,8 @@ public interface HFileDataBlockEncoder {
|
|||
void saveMetadata(HFile.Writer writer)
|
||||
throws IOException;
|
||||
|
||||
/** @return the on-disk data block encoding */
|
||||
DataBlockEncoding getEncodingOnDisk();
|
||||
|
||||
/** @return the preferred in-cache data block encoding for normal reads */
|
||||
DataBlockEncoding getEncodingInCache();
|
||||
|
||||
/**
|
||||
* @return the effective in-cache data block encoding, taking into account
|
||||
* whether we are doing a compaction.
|
||||
*/
|
||||
DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction);
|
||||
/** @return the data block encoding */
|
||||
DataBlockEncoding getDataBlockEncoding();
|
||||
|
||||
/**
|
||||
* Create an encoder specific encoding context object for writing. And the
|
||||
|
@ -103,7 +75,7 @@ public interface HFileDataBlockEncoder {
|
|||
* @param fileContext HFile meta data
|
||||
* @return a new {@link HFileBlockEncodingContext} object
|
||||
*/
|
||||
HFileBlockEncodingContext newOnDiskDataBlockEncodingContext(byte[] headerBytes,
|
||||
HFileBlockEncodingContext newDataBlockEncodingContext(byte[] headerBytes,
|
||||
HFileContext fileContext);
|
||||
|
||||
/**
|
||||
|
@ -114,6 +86,5 @@ public interface HFileDataBlockEncoder {
|
|||
* @param fileContext - HFile meta data
|
||||
* @return a new {@link HFileBlockDecodingContext} object
|
||||
*/
|
||||
HFileBlockDecodingContext newOnDiskDataBlockDecodingContext(HFileContext fileContext);
|
||||
|
||||
HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext fileContext);
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ import java.io.IOException;
|
|||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
|
||||
|
@ -30,147 +29,50 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
|
|||
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* Do different kinds of data block encoding according to column family
|
||||
* options.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
|
||||
private final DataBlockEncoding onDisk;
|
||||
private final DataBlockEncoding inCache;
|
||||
private final byte[] dummyHeader;
|
||||
private final DataBlockEncoding encoding;
|
||||
|
||||
/**
|
||||
* Do data block encoding with specified options.
|
||||
* @param encoding What kind of data block encoding will be used.
|
||||
*/
|
||||
public HFileDataBlockEncoderImpl(DataBlockEncoding encoding) {
|
||||
this(encoding, encoding);
|
||||
}
|
||||
|
||||
/**
|
||||
* Do data block encoding with specified options.
|
||||
* @param onDisk What kind of data block encoding will be used before writing
|
||||
* HFileBlock to disk. This must be either the same as inCache or
|
||||
* {@link DataBlockEncoding#NONE}.
|
||||
* @param inCache What kind of data block encoding will be used in block
|
||||
* cache.
|
||||
*/
|
||||
public HFileDataBlockEncoderImpl(DataBlockEncoding onDisk,
|
||||
DataBlockEncoding inCache) {
|
||||
this(onDisk, inCache, HConstants.HFILEBLOCK_DUMMY_HEADER);
|
||||
}
|
||||
|
||||
/**
|
||||
* Do data block encoding with specified options.
|
||||
* @param onDisk What kind of data block encoding will be used before writing
|
||||
* HFileBlock to disk. This must be either the same as inCache or
|
||||
* {@link DataBlockEncoding#NONE}.
|
||||
* @param inCache What kind of data block encoding will be used in block
|
||||
* cache.
|
||||
* @param dummyHeader dummy header bytes
|
||||
*/
|
||||
public HFileDataBlockEncoderImpl(DataBlockEncoding onDisk,
|
||||
DataBlockEncoding inCache, byte[] dummyHeader) {
|
||||
this.onDisk = onDisk != null ?
|
||||
onDisk : DataBlockEncoding.NONE;
|
||||
this.inCache = inCache != null ?
|
||||
inCache : DataBlockEncoding.NONE;
|
||||
this.dummyHeader = dummyHeader;
|
||||
|
||||
Preconditions.checkArgument(onDisk == DataBlockEncoding.NONE ||
|
||||
onDisk == inCache, "on-disk encoding (" + onDisk + ") must be " +
|
||||
"either the same as in-cache encoding (" + inCache + ") or " +
|
||||
DataBlockEncoding.NONE);
|
||||
this.encoding = encoding != null ? encoding : DataBlockEncoding.NONE;
|
||||
}
|
||||
|
||||
public static HFileDataBlockEncoder createFromFileInfo(
|
||||
FileInfo fileInfo, DataBlockEncoding preferredEncodingInCache)
|
||||
throws IOException {
|
||||
boolean hasPreferredCacheEncoding = preferredEncodingInCache != null
|
||||
&& preferredEncodingInCache != DataBlockEncoding.NONE;
|
||||
|
||||
FileInfo fileInfo) throws IOException {
|
||||
DataBlockEncoding encoding = DataBlockEncoding.NONE;
|
||||
byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING);
|
||||
if (dataBlockEncodingType == null && !hasPreferredCacheEncoding) {
|
||||
return NoOpDataBlockEncoder.INSTANCE;
|
||||
}
|
||||
|
||||
DataBlockEncoding onDisk;
|
||||
if (dataBlockEncodingType == null) {
|
||||
onDisk = DataBlockEncoding.NONE;
|
||||
} else {
|
||||
if (dataBlockEncodingType != null) {
|
||||
String dataBlockEncodingStr = Bytes.toString(dataBlockEncodingType);
|
||||
try {
|
||||
onDisk = DataBlockEncoding.valueOf(dataBlockEncodingStr);
|
||||
encoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);
|
||||
} catch (IllegalArgumentException ex) {
|
||||
throw new IOException("Invalid data block encoding type in file info: "
|
||||
+ dataBlockEncodingStr, ex);
|
||||
+ dataBlockEncodingStr, ex);
|
||||
}
|
||||
}
|
||||
|
||||
DataBlockEncoding inCache;
|
||||
if (onDisk == DataBlockEncoding.NONE) {
|
||||
// This is an "in-cache-only" encoding or fully-unencoded scenario.
|
||||
// Either way, we use the given encoding (possibly NONE) specified by
|
||||
// the column family in cache.
|
||||
inCache = preferredEncodingInCache;
|
||||
} else {
|
||||
// Leave blocks in cache encoded the same way as they are on disk.
|
||||
// If we switch encoding type for the CF or the in-cache-only encoding
|
||||
// flag, old files will keep their encoding both on disk and in cache,
|
||||
// but new files will be generated with the new encoding.
|
||||
inCache = onDisk;
|
||||
if (encoding == DataBlockEncoding.NONE) {
|
||||
return NoOpDataBlockEncoder.INSTANCE;
|
||||
}
|
||||
// TODO: we are not passing proper header size here based on minor version, presumably
|
||||
// because this encoder will never actually be used for encoding.
|
||||
return new HFileDataBlockEncoderImpl(onDisk, inCache);
|
||||
return new HFileDataBlockEncoderImpl(encoding);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void saveMetadata(HFile.Writer writer) throws IOException {
|
||||
writer.appendFileInfo(DATA_BLOCK_ENCODING, onDisk.getNameInBytes());
|
||||
writer.appendFileInfo(DATA_BLOCK_ENCODING, encoding.getNameInBytes());
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataBlockEncoding getEncodingOnDisk() {
|
||||
return onDisk;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataBlockEncoding getEncodingInCache() {
|
||||
return inCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) {
|
||||
if (!useEncodedScanner(isCompaction)) {
|
||||
return DataBlockEncoding.NONE;
|
||||
}
|
||||
return inCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
public HFileBlock diskToCacheFormat(HFileBlock block, boolean isCompaction) {
|
||||
if (block.getBlockType() == BlockType.DATA) {
|
||||
if (!useEncodedScanner(isCompaction)) {
|
||||
// Unencoded block, and we don't want to encode in cache.
|
||||
return block;
|
||||
}
|
||||
// Encode the unencoded block with the in-cache encoding.
|
||||
return encodeDataBlock(block, inCache,
|
||||
createInCacheEncodingContext(block.getHFileContext()));
|
||||
}
|
||||
|
||||
if (block.getBlockType() == BlockType.ENCODED_DATA) {
|
||||
if (block.getDataBlockEncodingId() == onDisk.getId()) {
|
||||
// The block is already in the desired in-cache encoding.
|
||||
return block;
|
||||
}
|
||||
// We don't want to re-encode a block in a different encoding. The HFile
|
||||
// reader should have been instantiated in such a way that we would not
|
||||
// have to do this.
|
||||
throw new AssertionError("Expected on-disk data block encoding " +
|
||||
onDisk + ", got " + block.getDataBlockEncoding());
|
||||
}
|
||||
return block;
|
||||
public DataBlockEncoding getDataBlockEncoding() {
|
||||
return encoding;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -184,21 +86,18 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
|
|||
public void beforeWriteToDisk(ByteBuffer in,
|
||||
HFileBlockEncodingContext encodeCtx,
|
||||
BlockType blockType) throws IOException {
|
||||
if (onDisk == DataBlockEncoding.NONE) {
|
||||
if (encoding == DataBlockEncoding.NONE) {
|
||||
// there is no need to encode the block before writing it to disk
|
||||
((HFileBlockDefaultEncodingContext) encodeCtx).compressAfterEncodingWithBlockType(
|
||||
in.array(), blockType);
|
||||
return;
|
||||
}
|
||||
encodeBufferToHFileBlockBuffer(in, onDisk, encodeCtx);
|
||||
encodeBufferToHFileBlockBuffer(in, encoding, encodeCtx);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean useEncodedScanner(boolean isCompaction) {
|
||||
if (isCompaction && onDisk == DataBlockEncoding.NONE) {
|
||||
return false;
|
||||
}
|
||||
return inCache != DataBlockEncoding.NONE;
|
||||
public boolean useEncodedScanner() {
|
||||
return encoding != DataBlockEncoding.NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -222,66 +121,27 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
|
|||
}
|
||||
}
|
||||
|
||||
private HFileBlock encodeDataBlock(HFileBlock block, DataBlockEncoding algo,
|
||||
HFileBlockEncodingContext encodingCtx) {
|
||||
encodingCtx.setDummyHeader(block.getDummyHeaderForVersion());
|
||||
encodeBufferToHFileBlockBuffer(
|
||||
block.getBufferWithoutHeader(), algo, encodingCtx);
|
||||
byte[] encodedUncompressedBytes =
|
||||
encodingCtx.getUncompressedBytesWithHeader();
|
||||
ByteBuffer bufferWrapper = ByteBuffer.wrap(encodedUncompressedBytes);
|
||||
int sizeWithoutHeader = bufferWrapper.limit() - block.headerSize();
|
||||
HFileBlock encodedBlock = new HFileBlock(BlockType.ENCODED_DATA,
|
||||
block.getOnDiskSizeWithoutHeader(),
|
||||
sizeWithoutHeader, block.getPrevBlockOffset(),
|
||||
bufferWrapper, HFileBlock.FILL_HEADER, block.getOffset(),
|
||||
block.getOnDiskDataSizeWithHeader(), encodingCtx.getHFileContext());
|
||||
return encodedBlock;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new encoding context given the inCache encoding scheme provided in the constructor.
|
||||
* This used to be kept around but HFileBlockDefaultEncodingContext isn't thread-safe.
|
||||
* See HBASE-8732
|
||||
* @return a new in cache encoding context
|
||||
*/
|
||||
private HFileBlockEncodingContext createInCacheEncodingContext(HFileContext fileContext) {
|
||||
HFileContext newContext = new HFileContext(fileContext);
|
||||
return (inCache != DataBlockEncoding.NONE) ?
|
||||
this.inCache.getEncoder().newDataBlockEncodingContext(
|
||||
this.inCache, dummyHeader, newContext)
|
||||
:
|
||||
// create a default encoding context
|
||||
new HFileBlockDefaultEncodingContext(this.inCache, dummyHeader, newContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getClass().getSimpleName() + "(onDisk=" + onDisk + ", inCache=" +
|
||||
inCache + ")";
|
||||
return getClass().getSimpleName() + "(encoding=" + encoding + ")";
|
||||
}
|
||||
|
||||
@Override
|
||||
public HFileBlockEncodingContext newOnDiskDataBlockEncodingContext(
|
||||
public HFileBlockEncodingContext newDataBlockEncodingContext(
|
||||
byte[] dummyHeader, HFileContext fileContext) {
|
||||
if (onDisk != null) {
|
||||
DataBlockEncoder encoder = onDisk.getEncoder();
|
||||
if (encoder != null) {
|
||||
return encoder.newDataBlockEncodingContext(onDisk, dummyHeader, fileContext);
|
||||
}
|
||||
DataBlockEncoder encoder = encoding.getEncoder();
|
||||
if (encoder != null) {
|
||||
return encoder.newDataBlockEncodingContext(encoding, dummyHeader, fileContext);
|
||||
}
|
||||
return new HFileBlockDefaultEncodingContext(null, dummyHeader, fileContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
public HFileBlockDecodingContext newOnDiskDataBlockDecodingContext(HFileContext fileContext) {
|
||||
if (onDisk != null) {
|
||||
DataBlockEncoder encoder = onDisk.getEncoder();
|
||||
if (encoder != null) {
|
||||
return encoder.newDataBlockDecodingContext(fileContext);
|
||||
}
|
||||
public HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext fileContext) {
|
||||
DataBlockEncoder encoder = encoding.getEncoder();
|
||||
if (encoder != null) {
|
||||
return encoder.newDataBlockDecodingContext(fileContext);
|
||||
}
|
||||
return new HFileBlockDefaultDecodingContext(fileContext);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -109,20 +109,16 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
* @param fsdis input stream.
|
||||
* @param size Length of the stream.
|
||||
* @param cacheConf Cache configuration.
|
||||
* @param preferredEncodingInCache the encoding to use in cache in case we
|
||||
* have a choice. If the file is already encoded on disk, we will
|
||||
* still use its on-disk encoding in cache.
|
||||
* @param hfs
|
||||
*/
|
||||
public HFileReaderV2(Path path, FixedFileTrailer trailer,
|
||||
final FSDataInputStreamWrapper fsdis, final long size, final CacheConfig cacheConf,
|
||||
DataBlockEncoding preferredEncodingInCache, final HFileSystem hfs)
|
||||
final HFileSystem hfs)
|
||||
throws IOException {
|
||||
super(path, trailer, size, cacheConf, hfs);
|
||||
trailer.expectMajorVersion(getMajorVersion());
|
||||
validateMinorVersion(path, trailer.getMinorVersion());
|
||||
this.hfileContext = createHFileContext(trailer);
|
||||
// Should we set the preferredEncodinginCache here for the context
|
||||
HFileBlock.FSReaderV2 fsBlockReaderV2 = new HFileBlock.FSReaderV2(fsdis, fileSize, hfs, path,
|
||||
hfileContext);
|
||||
this.fsBlockReader = fsBlockReaderV2; // upcast
|
||||
|
@ -168,8 +164,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
}
|
||||
|
||||
// Read data block encoding algorithm name from file info.
|
||||
dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo,
|
||||
preferredEncodingInCache);
|
||||
dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo);
|
||||
fsBlockReaderV2.setDataBlockEncoder(dataBlockEncoder);
|
||||
|
||||
// Store all other load-on-open blocks for further consumption.
|
||||
|
@ -203,8 +198,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
@Override
|
||||
public HFileScanner getScanner(boolean cacheBlocks, final boolean pread,
|
||||
final boolean isCompaction) {
|
||||
// check if we want to use data block encoding in memory
|
||||
if (dataBlockEncoder.useEncodedScanner(isCompaction)) {
|
||||
if (dataBlockEncoder.useEncodedScanner()) {
|
||||
return new EncodedScannerV2(this, cacheBlocks, pread, isCompaction,
|
||||
hfileContext);
|
||||
}
|
||||
|
@ -310,7 +304,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
|
||||
BlockCacheKey cacheKey =
|
||||
new BlockCacheKey(name, dataBlockOffset,
|
||||
dataBlockEncoder.getEffectiveEncodingInCache(isCompaction),
|
||||
dataBlockEncoder.getDataBlockEncoding(),
|
||||
expectedBlockType);
|
||||
|
||||
boolean useLock = false;
|
||||
|
@ -329,19 +323,17 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
HFileBlock cachedBlock = (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey,
|
||||
cacheBlock, useLock);
|
||||
if (cachedBlock != null) {
|
||||
if (cachedBlock.getBlockType() == BlockType.DATA) {
|
||||
HFile.dataBlockReadCnt.incrementAndGet();
|
||||
}
|
||||
|
||||
validateBlockType(cachedBlock, expectedBlockType);
|
||||
if (cachedBlock.getBlockType().isData()) {
|
||||
HFile.dataBlockReadCnt.incrementAndGet();
|
||||
|
||||
// Validate encoding type for encoded blocks. We include encoding
|
||||
// type in the cache key, and we expect it to match on a cache hit.
|
||||
if (cachedBlock.getBlockType() == BlockType.ENCODED_DATA
|
||||
&& cachedBlock.getDataBlockEncoding() != dataBlockEncoder.getEncodingInCache()) {
|
||||
throw new IOException("Cached block under key " + cacheKey + " "
|
||||
// Validate encoding type for data blocks. We include encoding
|
||||
// type in the cache key, and we expect it to match on a cache hit.
|
||||
if (cachedBlock.getDataBlockEncoding() != dataBlockEncoder.getDataBlockEncoding()) {
|
||||
throw new IOException("Cached block under key " + cacheKey + " "
|
||||
+ "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + " (expected: "
|
||||
+ dataBlockEncoder.getEncodingInCache() + ")");
|
||||
+ dataBlockEncoder.getDataBlockEncoding() + ")");
|
||||
}
|
||||
}
|
||||
return cachedBlock;
|
||||
}
|
||||
|
@ -359,7 +351,6 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
long startTimeNs = System.nanoTime();
|
||||
HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, -1,
|
||||
pread);
|
||||
hfileBlock = diskToCacheFormat(hfileBlock, isCompaction);
|
||||
validateBlockType(hfileBlock, expectedBlockType);
|
||||
|
||||
final long delta = System.nanoTime() - startTimeNs;
|
||||
|
@ -370,7 +361,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock, cacheConf.isInMemory());
|
||||
}
|
||||
|
||||
if (hfileBlock.getBlockType() == BlockType.DATA) {
|
||||
if (hfileBlock.getBlockType().isData()) {
|
||||
HFile.dataBlockReadCnt.incrementAndGet();
|
||||
}
|
||||
|
||||
|
@ -384,10 +375,6 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
}
|
||||
}
|
||||
|
||||
protected HFileBlock diskToCacheFormat( HFileBlock hfileBlock, final boolean isCompaction) {
|
||||
return dataBlockEncoder.diskToCacheFormat(hfileBlock, isCompaction);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasMVCCInfo() {
|
||||
return includesMemstoreTS && decodeMemstoreTS;
|
||||
|
@ -616,8 +603,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
+ curBlock.getOnDiskSizeWithHeader(),
|
||||
curBlock.getNextBlockOnDiskSizeWithHeader(), cacheBlocks, pread,
|
||||
isCompaction, null);
|
||||
} while (!(curBlock.getBlockType().equals(BlockType.DATA) ||
|
||||
curBlock.getBlockType().equals(BlockType.ENCODED_DATA)));
|
||||
} while (!curBlock.getBlockType().isData());
|
||||
|
||||
return curBlock;
|
||||
}
|
||||
|
@ -981,23 +967,22 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
* ScannerV2 that operates on encoded data blocks.
|
||||
*/
|
||||
protected static class EncodedScannerV2 extends AbstractScannerV2 {
|
||||
private DataBlockEncoder.EncodedSeeker seeker = null;
|
||||
protected DataBlockEncoder dataBlockEncoder = null;
|
||||
private final HFileBlockDecodingContext decodingCtx;
|
||||
private final DataBlockEncoder.EncodedSeeker seeker;
|
||||
private final DataBlockEncoder dataBlockEncoder;
|
||||
protected final HFileContext meta;
|
||||
protected HFileBlockDecodingContext decodingCtx;
|
||||
|
||||
public EncodedScannerV2(HFileReaderV2 reader, boolean cacheBlocks,
|
||||
boolean pread, boolean isCompaction, HFileContext meta) {
|
||||
super(reader, cacheBlocks, pread, isCompaction);
|
||||
DataBlockEncoding encoding = reader.dataBlockEncoder.getDataBlockEncoding();
|
||||
dataBlockEncoder = encoding.getEncoder();
|
||||
decodingCtx = dataBlockEncoder.newDataBlockDecodingContext(meta);
|
||||
seeker = dataBlockEncoder.createSeeker(
|
||||
reader.getComparator(), decodingCtx);
|
||||
this.meta = meta;
|
||||
}
|
||||
|
||||
protected void setDataBlockEncoder(DataBlockEncoder dataBlockEncoder) {
|
||||
this.dataBlockEncoder = dataBlockEncoder;
|
||||
decodingCtx = this.dataBlockEncoder.newDataBlockDecodingContext(
|
||||
this.meta);
|
||||
seeker = dataBlockEncoder.createSeeker(reader.getComparator(), decodingCtx);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSeeked(){
|
||||
return this.block != null;
|
||||
|
@ -1008,8 +993,9 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
* the the first key/value pair.
|
||||
*
|
||||
* @param newBlock the block to make current
|
||||
* @throws CorruptHFileException
|
||||
*/
|
||||
protected void updateCurrentBlock(HFileBlock newBlock) {
|
||||
private void updateCurrentBlock(HFileBlock newBlock) throws CorruptHFileException {
|
||||
block = newBlock;
|
||||
|
||||
// sanity checks
|
||||
|
@ -1017,8 +1003,14 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
throw new IllegalStateException(
|
||||
"EncodedScanner works only on encoded data blocks");
|
||||
}
|
||||
short dataBlockEncoderId = block.getDataBlockEncodingId();
|
||||
if (!DataBlockEncoding.isCorrectEncoder(dataBlockEncoder, dataBlockEncoderId)) {
|
||||
String encoderCls = dataBlockEncoder.getClass().getName();
|
||||
throw new CorruptHFileException("Encoder " + encoderCls
|
||||
+ " doesn't support data block encoding "
|
||||
+ DataBlockEncoding.getNameFromId(dataBlockEncoderId));
|
||||
}
|
||||
|
||||
updateDataBlockEncoder(block);
|
||||
seeker.setCurrentBuffer(getEncodedBuffer(newBlock));
|
||||
blockFetches++;
|
||||
|
||||
|
@ -1026,15 +1018,6 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
this.nextIndexedKey = null;
|
||||
}
|
||||
|
||||
private void updateDataBlockEncoder(HFileBlock curBlock) {
|
||||
short dataBlockEncoderId = curBlock.getDataBlockEncodingId();
|
||||
if (dataBlockEncoder == null ||
|
||||
!DataBlockEncoding.isCorrectEncoder(dataBlockEncoder, dataBlockEncoderId)) {
|
||||
DataBlockEncoder encoder = DataBlockEncoding.getDataBlockEncoderById(dataBlockEncoderId);
|
||||
setDataBlockEncoder(encoder);
|
||||
}
|
||||
}
|
||||
|
||||
private ByteBuffer getEncodedBuffer(HFileBlock newBlock) {
|
||||
ByteBuffer origBlock = newBlock.getBufferReadOnly();
|
||||
ByteBuffer encodedBlock = ByteBuffer.wrap(origBlock.array(),
|
||||
|
@ -1132,7 +1115,6 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
|
||||
@Override
|
||||
protected ByteBuffer getFirstKeyInBlock(HFileBlock curBlock) {
|
||||
updateDataBlockEncoder(curBlock);
|
||||
return dataBlockEncoder.getFirstKeyInBlock(getEncodedBuffer(curBlock));
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
|
||||
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -51,15 +50,11 @@ public class HFileReaderV3 extends HFileReaderV2 {
|
|||
* Length of the stream.
|
||||
* @param cacheConf
|
||||
* Cache configuration.
|
||||
* @param preferredEncodingInCache
|
||||
* the encoding to use in cache in case we have a choice. If the file
|
||||
* is already encoded on disk, we will still use its on-disk encoding
|
||||
* in cache.
|
||||
*/
|
||||
public HFileReaderV3(Path path, FixedFileTrailer trailer, final FSDataInputStreamWrapper fsdis,
|
||||
final long size, final CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache,
|
||||
final long size, final CacheConfig cacheConf,
|
||||
final HFileSystem hfs) throws IOException {
|
||||
super(path, trailer, fsdis, size, cacheConf, preferredEncodingInCache, hfs);
|
||||
super(path, trailer, fsdis, size, cacheConf, hfs);
|
||||
byte[] tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
|
||||
// max tag length is not present in the HFile means tags were not at all written to file.
|
||||
if (tmp != null) {
|
||||
|
@ -98,8 +93,7 @@ public class HFileReaderV3 extends HFileReaderV2 {
|
|||
@Override
|
||||
public HFileScanner getScanner(boolean cacheBlocks, final boolean pread,
|
||||
final boolean isCompaction) {
|
||||
// check if we want to use data block encoding in memory
|
||||
if (dataBlockEncoder.useEncodedScanner(isCompaction)) {
|
||||
if (dataBlockEncoder.useEncodedScanner()) {
|
||||
return new EncodedScannerV3(this, cacheBlocks, pread, isCompaction, this.hfileContext);
|
||||
}
|
||||
return new ScannerV3(this, cacheBlocks, pread, isCompaction);
|
||||
|
@ -277,9 +271,4 @@ public class HFileReaderV3 extends HFileReaderV2 {
|
|||
public int getMajorVersion() {
|
||||
return 3;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected HFileBlock diskToCacheFormat(HFileBlock hfileBlock, final boolean isCompaction) {
|
||||
return dataBlockEncoder.diskToCacheFormat(hfileBlock, isCompaction);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -194,13 +194,9 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
* the cache key.
|
||||
*/
|
||||
private void doCacheOnWrite(long offset) {
|
||||
// We don't cache-on-write data blocks on compaction, so assume this is not
|
||||
// a compaction.
|
||||
final boolean isCompaction = false;
|
||||
HFileBlock cacheFormatBlock = blockEncoder.diskToCacheFormat(
|
||||
fsBlockWriter.getBlockForCaching(), isCompaction);
|
||||
HFileBlock cacheFormatBlock = fsBlockWriter.getBlockForCaching();
|
||||
cacheConf.getBlockCache().cacheBlock(
|
||||
new BlockCacheKey(name, offset, blockEncoder.getEncodingInCache(),
|
||||
new BlockCacheKey(name, offset, blockEncoder.getDataBlockEncoding(),
|
||||
cacheFormatBlock.getBlockType()), cacheFormatBlock);
|
||||
}
|
||||
|
||||
|
|
|
@ -179,8 +179,8 @@ public class HFileWriterV3 extends HFileWriterV2 {
|
|||
// When tags are not being written in this file, MAX_TAGS_LEN is excluded
|
||||
// from the FileInfo
|
||||
fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false);
|
||||
boolean tagsCompressed = (hFileContext.getEncodingOnDisk() != DataBlockEncoding.NONE)
|
||||
&& hFileContext.isCompressTags();
|
||||
boolean tagsCompressed = (hFileContext.getDataBlockEncoding() != DataBlockEncoding.NONE)
|
||||
&& hFileContext.isCompressTags();
|
||||
fileInfo.append(FileInfo.TAGS_COMPRESSED, Bytes.toBytes(tagsCompressed), false);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,13 +20,11 @@ import java.io.IOException;
|
|||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
|
||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
|
||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
|
||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
|
||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
|
||||
/**
|
||||
* Does not perform any kind of encoding/decoding.
|
||||
|
@ -41,14 +39,6 @@ public class NoOpDataBlockEncoder implements HFileDataBlockEncoder {
|
|||
private NoOpDataBlockEncoder() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public HFileBlock diskToCacheFormat(HFileBlock block, boolean isCompaction) {
|
||||
if (block.getBlockType() == BlockType.ENCODED_DATA) {
|
||||
throw new IllegalStateException("Unexpected encoded block");
|
||||
}
|
||||
return block;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeWriteToDisk(ByteBuffer in,
|
||||
HFileBlockEncodingContext encodeCtx, BlockType blockType)
|
||||
|
@ -65,7 +55,7 @@ public class NoOpDataBlockEncoder implements HFileDataBlockEncoder {
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean useEncodedScanner(boolean isCompaction) {
|
||||
public boolean useEncodedScanner() {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -74,17 +64,7 @@ public class NoOpDataBlockEncoder implements HFileDataBlockEncoder {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DataBlockEncoding getEncodingOnDisk() {
|
||||
return DataBlockEncoding.NONE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataBlockEncoding getEncodingInCache() {
|
||||
return DataBlockEncoding.NONE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) {
|
||||
public DataBlockEncoding getDataBlockEncoding() {
|
||||
return DataBlockEncoding.NONE;
|
||||
}
|
||||
|
||||
|
@ -94,14 +74,13 @@ public class NoOpDataBlockEncoder implements HFileDataBlockEncoder {
|
|||
}
|
||||
|
||||
@Override
|
||||
public HFileBlockEncodingContext newOnDiskDataBlockEncodingContext(
|
||||
public HFileBlockEncodingContext newDataBlockEncodingContext(
|
||||
byte[] dummyHeader, HFileContext meta) {
|
||||
return new HFileBlockDefaultEncodingContext(null, dummyHeader, meta);
|
||||
}
|
||||
|
||||
@Override
|
||||
public HFileBlockDecodingContext newOnDiskDataBlockDecodingContext(HFileContext meta) {
|
||||
public HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext meta) {
|
||||
return new HFileBlockDefaultDecodingContext(meta);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -198,8 +198,7 @@ public class HFileOutputFormat extends FileOutputFormat<ImmutableBytesWritable,
|
|||
.withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
|
||||
.withBlockSize(blockSize);
|
||||
if(dataBlockEncodingStr != null) {
|
||||
contextBuilder.withDataBlockEncodingOnDisk(DataBlockEncoding.valueOf(dataBlockEncodingStr))
|
||||
.withDataBlockEncodingInCache(DataBlockEncoding.valueOf(dataBlockEncodingStr));
|
||||
contextBuilder.withDataBlockEncoding(DataBlockEncoding.valueOf(dataBlockEncodingStr));
|
||||
}
|
||||
HFileContext hFileContext = contextBuilder.build();
|
||||
|
||||
|
|
|
@ -190,6 +190,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
* @param table the table to load into
|
||||
* @throws TableNotFoundException if table does not yet exist
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
public void doBulkLoad(Path hfofDir, final HTable table)
|
||||
throws TableNotFoundException, IOException
|
||||
{
|
||||
|
@ -650,8 +651,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
HalfStoreFileReader halfReader = null;
|
||||
StoreFile.Writer halfWriter = null;
|
||||
try {
|
||||
halfReader = new HalfStoreFileReader(fs, inFile, cacheConf,
|
||||
reference, DataBlockEncoding.NONE);
|
||||
halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference);
|
||||
Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
|
||||
|
||||
int blocksize = familyDescriptor.getBlocksize();
|
||||
|
@ -662,8 +662,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
.withChecksumType(HStore.getChecksumType(conf))
|
||||
.withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
|
||||
.withBlockSize(blocksize)
|
||||
.withDataBlockEncodingInCache(familyDescriptor.getDataBlockEncoding())
|
||||
.withDataBlockEncodingOnDisk(familyDescriptor.getDataBlockEncodingOnDisk())
|
||||
.withDataBlockEncoding(familyDescriptor.getDataBlockEncoding())
|
||||
.build();
|
||||
halfWriter = new StoreFile.WriterBuilder(conf, cacheConf,
|
||||
fs)
|
||||
|
|
|
@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
|||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
|
||||
|
@ -204,8 +203,7 @@ public class HStore implements Store {
|
|||
this.blocksize = family.getBlocksize();
|
||||
|
||||
this.dataBlockEncoder =
|
||||
new HFileDataBlockEncoderImpl(family.getDataBlockEncodingOnDisk(),
|
||||
family.getDataBlockEncoding());
|
||||
new HFileDataBlockEncoderImpl(family.getDataBlockEncoding());
|
||||
|
||||
this.comparator = info.getComparator();
|
||||
// used by ScanQueryMatcher
|
||||
|
@ -473,14 +471,10 @@ public class HStore implements Store {
|
|||
}
|
||||
|
||||
private StoreFile createStoreFileAndReader(final Path p) throws IOException {
|
||||
return createStoreFileAndReader(p, this.dataBlockEncoder);
|
||||
}
|
||||
|
||||
private StoreFile createStoreFileAndReader(final Path p, final HFileDataBlockEncoder encoder) throws IOException {
|
||||
StoreFileInfo info = new StoreFileInfo(conf, this.getFileSystem(), p);
|
||||
info.setRegionCoprocessorHost(this.region.getCoprocessorHost());
|
||||
StoreFile storeFile = new StoreFile(this.getFileSystem(), info, this.conf, this.cacheConf,
|
||||
this.family.getBloomFilterType(), encoder);
|
||||
this.family.getBloomFilterType());
|
||||
storeFile.createReader();
|
||||
return storeFile;
|
||||
}
|
||||
|
@ -833,8 +827,7 @@ public class HStore implements Store {
|
|||
.withBytesPerCheckSum(bytesPerChecksum)
|
||||
.withBlockSize(blocksize)
|
||||
.withHBaseCheckSum(true)
|
||||
.withDataBlockEncodingOnDisk(family.getDataBlockEncodingOnDisk())
|
||||
.withDataBlockEncodingInCache(family.getDataBlockEncoding())
|
||||
.withDataBlockEncoding(family.getDataBlockEncoding())
|
||||
.build();
|
||||
return hFileContext;
|
||||
}
|
||||
|
@ -1386,7 +1379,7 @@ public class HStore implements Store {
|
|||
throws IOException {
|
||||
StoreFile storeFile = null;
|
||||
try {
|
||||
storeFile = createStoreFileAndReader(path, NoOpDataBlockEncoder.INSTANCE);
|
||||
storeFile = createStoreFileAndReader(path);
|
||||
} catch (IOException e) {
|
||||
LOG.error("Failed to open store file : " + path
|
||||
+ ", keeping it in tmp location", e);
|
||||
|
|
|
@ -61,7 +61,6 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
|||
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
|
||||
|
@ -1648,7 +1647,6 @@ public class RegionCoprocessorHost
|
|||
* @param in {@link FSDataInputStreamWrapper}
|
||||
* @param size Full size of the file
|
||||
* @param cacheConf
|
||||
* @param preferredEncodingInCache
|
||||
* @param r original reference file. This will be not null only when reading a split file.
|
||||
* @return a Reader instance to use instead of the base reader if overriding
|
||||
* default behavior, null otherwise
|
||||
|
@ -1656,7 +1654,7 @@ public class RegionCoprocessorHost
|
|||
*/
|
||||
public StoreFile.Reader preStoreFileReaderOpen(final FileSystem fs, final Path p,
|
||||
final FSDataInputStreamWrapper in, long size, final CacheConfig cacheConf,
|
||||
final DataBlockEncoding preferredEncodingInCache, final Reference r) throws IOException {
|
||||
final Reference r) throws IOException {
|
||||
StoreFile.Reader reader = null;
|
||||
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
|
||||
for (RegionEnvironment env : coprocessors) {
|
||||
|
@ -1664,7 +1662,7 @@ public class RegionCoprocessorHost
|
|||
ctx = ObserverContext.createAndPrepare(env, ctx);
|
||||
try {
|
||||
reader = ((RegionObserver) env.getInstance()).preStoreFileReaderOpen(ctx, fs, p, in,
|
||||
size, cacheConf, preferredEncodingInCache, r, reader);
|
||||
size, cacheConf, r, reader);
|
||||
} catch (Throwable e) {
|
||||
handleCoprocessorThrowable(env, e);
|
||||
}
|
||||
|
@ -1682,7 +1680,6 @@ public class RegionCoprocessorHost
|
|||
* @param in {@link FSDataInputStreamWrapper}
|
||||
* @param size Full size of the file
|
||||
* @param cacheConf
|
||||
* @param preferredEncodingInCache
|
||||
* @param r original reference file. This will be not null only when reading a split file.
|
||||
* @param reader the base reader instance
|
||||
* @return The reader to use
|
||||
|
@ -1690,15 +1687,14 @@ public class RegionCoprocessorHost
|
|||
*/
|
||||
public StoreFile.Reader postStoreFileReaderOpen(final FileSystem fs, final Path p,
|
||||
final FSDataInputStreamWrapper in, long size, final CacheConfig cacheConf,
|
||||
final DataBlockEncoding preferredEncodingInCache, final Reference r, StoreFile.Reader reader)
|
||||
throws IOException {
|
||||
final Reference r, StoreFile.Reader reader) throws IOException {
|
||||
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
|
||||
for (RegionEnvironment env : coprocessors) {
|
||||
if (env.getInstance() instanceof RegionObserver) {
|
||||
ctx = ObserverContext.createAndPrepare(env, ctx);
|
||||
try {
|
||||
reader = ((RegionObserver) env.getInstance()).postStoreFileReaderOpen(ctx, fs, p, in,
|
||||
size, cacheConf, preferredEncodingInCache, r, reader);
|
||||
size, cacheConf, r, reader);
|
||||
} catch (Throwable e) {
|
||||
handleCoprocessorThrowable(env, e);
|
||||
}
|
||||
|
|
|
@ -48,10 +48,8 @@ import org.apache.hadoop.hbase.io.hfile.BlockType;
|
|||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileWriterV2;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.Compactor;
|
||||
import org.apache.hadoop.hbase.util.BloomFilter;
|
||||
import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
||||
|
@ -123,9 +121,6 @@ public class StoreFile {
|
|||
// Block cache configuration and reference.
|
||||
private final CacheConfig cacheConf;
|
||||
|
||||
// What kind of data block encoding will be used
|
||||
private final HFileDataBlockEncoder dataBlockEncoder;
|
||||
|
||||
// Keys for metadata stored in backing HFile.
|
||||
// Set when we obtain a Reader.
|
||||
private long sequenceid = -1;
|
||||
|
@ -186,13 +181,11 @@ public class StoreFile {
|
|||
* as the Bloom filter type actually present in the HFile, because
|
||||
* column family configuration might change. If this is
|
||||
* {@link BloomType#NONE}, the existing Bloom filter is ignored.
|
||||
* @param dataBlockEncoder data block encoding algorithm.
|
||||
* @throws IOException When opening the reader fails.
|
||||
*/
|
||||
public StoreFile(final FileSystem fs, final Path p, final Configuration conf,
|
||||
final CacheConfig cacheConf, final BloomType cfBloomType,
|
||||
final HFileDataBlockEncoder dataBlockEncoder) throws IOException {
|
||||
this(fs, new StoreFileInfo(conf, fs, p), conf, cacheConf, cfBloomType, dataBlockEncoder);
|
||||
final CacheConfig cacheConf, final BloomType cfBloomType) throws IOException {
|
||||
this(fs, new StoreFileInfo(conf, fs, p), conf, cacheConf, cfBloomType);
|
||||
}
|
||||
|
||||
|
||||
|
@ -209,18 +202,13 @@ public class StoreFile {
|
|||
* as the Bloom filter type actually present in the HFile, because
|
||||
* column family configuration might change. If this is
|
||||
* {@link BloomType#NONE}, the existing Bloom filter is ignored.
|
||||
* @param dataBlockEncoder data block encoding algorithm.
|
||||
* @throws IOException When opening the reader fails.
|
||||
*/
|
||||
public StoreFile(final FileSystem fs, final StoreFileInfo fileInfo, final Configuration conf,
|
||||
final CacheConfig cacheConf, final BloomType cfBloomType,
|
||||
final HFileDataBlockEncoder dataBlockEncoder) throws IOException {
|
||||
final CacheConfig cacheConf, final BloomType cfBloomType) throws IOException {
|
||||
this.fs = fs;
|
||||
this.fileInfo = fileInfo;
|
||||
this.cacheConf = cacheConf;
|
||||
this.dataBlockEncoder =
|
||||
dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE
|
||||
: dataBlockEncoder;
|
||||
|
||||
if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
|
||||
this.cfBloomType = cfBloomType;
|
||||
|
@ -363,7 +351,7 @@ public class StoreFile {
|
|||
}
|
||||
|
||||
// Open the StoreFile.Reader
|
||||
this.reader = fileInfo.open(this.fs, this.cacheConf, dataBlockEncoder.getEncodingInCache());
|
||||
this.reader = fileInfo.open(this.fs, this.cacheConf);
|
||||
|
||||
// Load up indices and fileinfo. This also loads Bloom filter type.
|
||||
metadataMap = Collections.unmodifiableMap(this.reader.loadFileInfo());
|
||||
|
@ -659,6 +647,7 @@ public class StoreFile {
|
|||
* @param comparator Comparator used to compare KVs.
|
||||
* @return The split point row, or null if splitting is not possible, or reader is null.
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
byte[] getFileSplitPoint(KVComparator comparator) throws IOException {
|
||||
if (this.reader == null) {
|
||||
LOG.warn("Storefile " + this + " Reader is null; cannot get split point");
|
||||
|
@ -1023,17 +1012,14 @@ public class StoreFile {
|
|||
private byte[] lastBloomKey;
|
||||
private long deleteFamilyCnt = -1;
|
||||
|
||||
public Reader(FileSystem fs, Path path, CacheConfig cacheConf,
|
||||
DataBlockEncoding preferredEncodingInCache) throws IOException {
|
||||
reader = HFile.createReaderWithEncoding(fs, path, cacheConf,
|
||||
preferredEncodingInCache);
|
||||
public Reader(FileSystem fs, Path path, CacheConfig cacheConf) throws IOException {
|
||||
reader = HFile.createReader(fs, path, cacheConf);
|
||||
bloomFilterType = BloomType.NONE;
|
||||
}
|
||||
|
||||
public Reader(FileSystem fs, Path path, FSDataInputStreamWrapper in, long size,
|
||||
CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache) throws IOException {
|
||||
reader = HFile.createReaderWithEncoding(
|
||||
fs, path, in, size, cacheConf, preferredEncodingInCache);
|
||||
CacheConfig cacheConf) throws IOException {
|
||||
reader = HFile.createReader(fs, path, in, size, cacheConf);
|
||||
bloomFilterType = BloomType.NONE;
|
||||
}
|
||||
|
||||
|
|
|
@ -168,11 +168,10 @@ public class StoreFileInfo {
|
|||
* Open a Reader for the StoreFile
|
||||
* @param fs The current file system to use.
|
||||
* @param cacheConf The cache configuration and block cache reference.
|
||||
* @param dataBlockEncoding data block encoding algorithm.
|
||||
* @return The StoreFile.Reader for the file
|
||||
*/
|
||||
public StoreFile.Reader open(final FileSystem fs, final CacheConfig cacheConf,
|
||||
final DataBlockEncoding dataBlockEncoding) throws IOException {
|
||||
public StoreFile.Reader open(final FileSystem fs,
|
||||
final CacheConfig cacheConf) throws IOException {
|
||||
FSDataInputStreamWrapper in;
|
||||
FileStatus status;
|
||||
|
||||
|
@ -198,19 +197,18 @@ public class StoreFileInfo {
|
|||
StoreFile.Reader reader = null;
|
||||
if (this.coprocessorHost != null) {
|
||||
reader = this.coprocessorHost.preStoreFileReaderOpen(fs, this.getPath(), in, length,
|
||||
cacheConf, dataBlockEncoding, reference);
|
||||
cacheConf, reference);
|
||||
}
|
||||
if (reader == null) {
|
||||
if (this.reference != null) {
|
||||
reader = new HalfStoreFileReader(fs, this.getPath(), in, length, cacheConf, reference,
|
||||
dataBlockEncoding);
|
||||
reader = new HalfStoreFileReader(fs, this.getPath(), in, length, cacheConf, reference);
|
||||
} else {
|
||||
reader = new StoreFile.Reader(fs, this.getPath(), in, length, cacheConf, dataBlockEncoding);
|
||||
reader = new StoreFile.Reader(fs, this.getPath(), in, length, cacheConf);
|
||||
}
|
||||
}
|
||||
if (this.coprocessorHost != null) {
|
||||
reader = this.coprocessorHost.postStoreFileReaderOpen(fs, this.getPath(), in, length,
|
||||
cacheConf, dataBlockEncoding, reference, reader);
|
||||
cacheConf, reference, reader);
|
||||
}
|
||||
return reader;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
@ -38,7 +37,6 @@ import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
|
|||
import org.apache.hadoop.hbase.io.hfile.HFileWriterV2;
|
||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
|
||||
import org.apache.hadoop.hbase.regionserver.ScanType;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
|
@ -148,7 +146,7 @@ public abstract class Compactor {
|
|||
", keycount=" + keyCount +
|
||||
", bloomtype=" + r.getBloomFilterType().toString() +
|
||||
", size=" + StringUtils.humanReadableInt(r.length()) +
|
||||
", encoding=" + r.getHFileReader().getEncodingOnDisk() +
|
||||
", encoding=" + r.getHFileReader().getDataBlockEncoding() +
|
||||
", seqNum=" + seqNum +
|
||||
(calculatePutTs ? ", earliestPutTs=" + earliestPutTs: ""));
|
||||
}
|
||||
|
@ -199,7 +197,6 @@ public abstract class Compactor {
|
|||
return store.getCoprocessorHost().preCompact(store, scanner, scanType, request);
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
/**
|
||||
* Performs the compaction.
|
||||
* @param scanner Where to read from.
|
||||
|
|
|
@ -31,7 +31,6 @@ import java.util.NavigableSet;
|
|||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -41,17 +40,16 @@ import org.apache.hadoop.hbase.CellUtil;
|
|||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Increment;
|
||||
import org.apache.hadoop.hbase.client.Mutation;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Increment;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
|
@ -68,6 +66,8 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
/**
|
||||
* A sample region observer that tests the RegionObserver interface.
|
||||
* It works with TestRegionObserverInterface to provide the test case.
|
||||
|
@ -561,7 +561,7 @@ public class SimpleRegionObserver extends BaseRegionObserver {
|
|||
@Override
|
||||
public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||
FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
|
||||
DataBlockEncoding preferredEncodingInCache, Reference r, Reader reader) throws IOException {
|
||||
Reference r, Reader reader) throws IOException {
|
||||
ctPreStoreFileReaderOpen.incrementAndGet();
|
||||
return null;
|
||||
}
|
||||
|
@ -569,7 +569,7 @@ public class SimpleRegionObserver extends BaseRegionObserver {
|
|||
@Override
|
||||
public Reader postStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||
FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
|
||||
DataBlockEncoding preferredEncodingInCache, Reference r, Reader reader) throws IOException {
|
||||
Reference r, Reader reader) throws IOException {
|
||||
ctPostStoreFileReaderOpen.incrementAndGet();
|
||||
return reader;
|
||||
}
|
||||
|
|
|
@ -117,7 +117,7 @@ public class TestHalfStoreFileReader {
|
|||
CacheConfig cacheConf)
|
||||
throws IOException {
|
||||
final HalfStoreFileReader halfreader = new HalfStoreFileReader(fs, p,
|
||||
cacheConf, bottom, DataBlockEncoding.NONE);
|
||||
cacheConf, bottom);
|
||||
halfreader.loadFileInfo();
|
||||
final HFileScanner scanner = halfreader.getScanner(false, false);
|
||||
|
||||
|
@ -218,7 +218,7 @@ public class TestHalfStoreFileReader {
|
|||
CacheConfig cacheConfig)
|
||||
throws IOException {
|
||||
final HalfStoreFileReader halfreader = new HalfStoreFileReader(fs, p,
|
||||
cacheConfig, bottom, DataBlockEncoding.NONE);
|
||||
cacheConfig, bottom);
|
||||
halfreader.loadFileInfo();
|
||||
final HFileScanner scanner = halfreader.getScanner(false, false);
|
||||
scanner.seekBefore(seekBefore.getKey());
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.Result;
|
|||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
@ -101,6 +102,7 @@ public class TestChangingEncoding {
|
|||
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024);
|
||||
// ((Log4JLogger)RpcServerImplementation.LOG).getLogger().setLevel(Level.TRACE);
|
||||
// ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.TRACE);
|
||||
conf.setBoolean("hbase.online.schema.update.enable", true);
|
||||
TEST_UTIL.startMiniCluster();
|
||||
}
|
||||
|
||||
|
@ -175,23 +177,30 @@ public class TestChangingEncoding {
|
|||
}
|
||||
|
||||
private void setEncodingConf(DataBlockEncoding encoding,
|
||||
boolean encodeOnDisk) throws IOException {
|
||||
boolean onlineChange) throws Exception {
|
||||
LOG.debug("Setting CF encoding to " + encoding + " (ordinal="
|
||||
+ encoding.ordinal() + "), encodeOnDisk=" + encodeOnDisk);
|
||||
admin.disableTable(tableName);
|
||||
+ encoding.ordinal() + "), onlineChange=" + onlineChange);
|
||||
hcd.setDataBlockEncoding(encoding);
|
||||
hcd.setEncodeOnDisk(encodeOnDisk);
|
||||
if (!onlineChange) {
|
||||
admin.disableTable(tableName);
|
||||
}
|
||||
admin.modifyColumn(tableName, hcd);
|
||||
admin.enableTable(tableName);
|
||||
if (!onlineChange) {
|
||||
admin.enableTable(tableName);
|
||||
}
|
||||
// This is a unit test, not integration test. So let's
|
||||
// wait for regions out of transition. Otherwise, for online
|
||||
// encoding change, verification phase may be flaky because
|
||||
// regions could be still in transition.
|
||||
ZKAssign.blockUntilNoRIT(TEST_UTIL.getZooKeeperWatcher());
|
||||
}
|
||||
|
||||
@Test(timeout=TIMEOUT_MS)
|
||||
public void testChangingEncoding() throws Exception {
|
||||
prepareTest("ChangingEncoding");
|
||||
for (boolean encodeOnDisk : new boolean[]{false, true}) {
|
||||
for (boolean onlineChange : new boolean[]{false, true}) {
|
||||
for (DataBlockEncoding encoding : ENCODINGS_TO_ITERATE) {
|
||||
LOG.info("encoding=" + encoding + ", encodeOnDisk=" + encodeOnDisk);
|
||||
setEncodingConf(encoding, encodeOnDisk);
|
||||
setEncodingConf(encoding, onlineChange);
|
||||
writeSomeNewData();
|
||||
verifyAllData();
|
||||
}
|
||||
|
@ -201,35 +210,9 @@ public class TestChangingEncoding {
|
|||
@Test(timeout=TIMEOUT_MS)
|
||||
public void testChangingEncodingWithCompaction() throws Exception {
|
||||
prepareTest("ChangingEncodingWithCompaction");
|
||||
for (boolean encodeOnDisk : new boolean[]{false, true}) {
|
||||
for (boolean onlineChange : new boolean[]{false, true}) {
|
||||
for (DataBlockEncoding encoding : ENCODINGS_TO_ITERATE) {
|
||||
setEncodingConf(encoding, encodeOnDisk);
|
||||
writeSomeNewData();
|
||||
verifyAllData();
|
||||
compactAndWait();
|
||||
verifyAllData();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=TIMEOUT_MS)
|
||||
public void testFlippingEncodeOnDisk() throws Exception {
|
||||
prepareTest("FlippingEncodeOnDisk");
|
||||
// The focus of this test case is to flip the "encoding on disk" flag,
|
||||
// so we only try a couple of encodings.
|
||||
DataBlockEncoding[] encodings = new DataBlockEncoding[] {
|
||||
DataBlockEncoding.NONE, DataBlockEncoding.FAST_DIFF };
|
||||
for (DataBlockEncoding encoding : encodings) {
|
||||
boolean[] flagValues;
|
||||
if (encoding == DataBlockEncoding.NONE) {
|
||||
// encodeOnDisk does not matter when not using encoding.
|
||||
flagValues =
|
||||
new boolean[] { HColumnDescriptor.DEFAULT_ENCODE_ON_DISK };
|
||||
} else {
|
||||
flagValues = new boolean[] { false, true, false, true };
|
||||
}
|
||||
for (boolean encodeOnDisk : flagValues) {
|
||||
setEncodingConf(encoding, encodeOnDisk);
|
||||
setEncodingConf(encoding, onlineChange);
|
||||
writeSomeNewData();
|
||||
verifyAllData();
|
||||
compactAndWait();
|
||||
|
|
|
@ -112,7 +112,6 @@ public class TestEncodedSeekers {
|
|||
// Need to disable default row bloom filter for this test to pass.
|
||||
HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
|
||||
setDataBlockEncoding(encoding).
|
||||
setEncodeOnDisk(encodeOnDisk).
|
||||
setBlocksize(BLOCK_SIZE).
|
||||
setBloomFilterType(BloomType.NONE).
|
||||
setCompressTags(compressTags);
|
||||
|
|
|
@ -78,7 +78,6 @@ public class TestLoadAndSwitchEncodeOnDisk extends
|
|||
assertAllOnLine(t);
|
||||
|
||||
admin.disableTable(TABLE);
|
||||
hcd.setEncodeOnDisk(false);
|
||||
admin.modifyColumn(TABLE, hcd);
|
||||
|
||||
System.err.println("\nRe-enabling table\n");
|
||||
|
|
|
@ -89,7 +89,6 @@ public class TestCacheOnWrite {
|
|||
private static final int INDEX_BLOCK_SIZE = 512;
|
||||
private static final int BLOOM_BLOCK_SIZE = 4096;
|
||||
private static final BloomType BLOOM_TYPE = BloomType.ROWCOL;
|
||||
private static final ChecksumType CKTYPE = ChecksumType.CRC32;
|
||||
private static final int CKBYTES = 512;
|
||||
|
||||
/** The number of valid key types possible in a store file */
|
||||
|
@ -136,22 +135,21 @@ public class TestCacheOnWrite {
|
|||
|
||||
/** Provides fancy names for three combinations of two booleans */
|
||||
private static enum BlockEncoderTestType {
|
||||
NO_BLOCK_ENCODING_NOOP(true, false),
|
||||
NO_BLOCK_ENCODING(false, false),
|
||||
BLOCK_ENCODING_IN_CACHE_ONLY(false, true),
|
||||
BLOCK_ENCODING_EVERYWHERE(true, true);
|
||||
BLOCK_ENCODING_EVERYWHERE(false, true);
|
||||
|
||||
private final boolean encodeOnDisk;
|
||||
private final boolean encodeInCache;
|
||||
private final boolean noop;
|
||||
private final boolean encode;
|
||||
|
||||
BlockEncoderTestType(boolean encodeOnDisk, boolean encodeInCache) {
|
||||
this.encodeOnDisk = encodeOnDisk;
|
||||
this.encodeInCache = encodeInCache;
|
||||
BlockEncoderTestType(boolean noop, boolean encode) {
|
||||
this.encode = encode;
|
||||
this.noop = noop;
|
||||
}
|
||||
|
||||
public HFileDataBlockEncoder getEncoder() {
|
||||
return new HFileDataBlockEncoderImpl(
|
||||
encodeOnDisk ? ENCODING_ALGO : DataBlockEncoding.NONE,
|
||||
encodeInCache ? ENCODING_ALGO : DataBlockEncoding.NONE);
|
||||
return noop ? NoOpDataBlockEncoder.INSTANCE : new HFileDataBlockEncoderImpl(
|
||||
encode ? ENCODING_ALGO : DataBlockEncoding.NONE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -221,11 +219,9 @@ public class TestCacheOnWrite {
|
|||
private void readStoreFile(boolean useTags) throws IOException {
|
||||
AbstractHFileReader reader;
|
||||
if (useTags) {
|
||||
reader = (HFileReaderV3) HFile.createReaderWithEncoding(fs, storeFilePath, cacheConf,
|
||||
encoder.getEncodingInCache());
|
||||
reader = (HFileReaderV3) HFile.createReader(fs, storeFilePath, cacheConf);
|
||||
} else {
|
||||
reader = (HFileReaderV2) HFile.createReaderWithEncoding(fs, storeFilePath, cacheConf,
|
||||
encoder.getEncodingInCache());
|
||||
reader = (HFileReaderV2) HFile.createReader(fs, storeFilePath, cacheConf);
|
||||
}
|
||||
LOG.info("HFile information: " + reader);
|
||||
final boolean cacheBlocks = false;
|
||||
|
@ -239,7 +235,7 @@ public class TestCacheOnWrite {
|
|||
new EnumMap<BlockType, Integer>(BlockType.class);
|
||||
|
||||
DataBlockEncoding encodingInCache =
|
||||
encoderType.getEncoder().getEncodingInCache();
|
||||
encoderType.getEncoder().getDataBlockEncoding();
|
||||
while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
|
||||
long onDiskSize = -1;
|
||||
if (prevBlock != null) {
|
||||
|
@ -272,7 +268,7 @@ public class TestCacheOnWrite {
|
|||
LOG.info("Block count by type: " + blockCountByType);
|
||||
String countByType = blockCountByType.toString();
|
||||
BlockType cachedDataBlockType =
|
||||
encoderType.encodeInCache ? BlockType.ENCODED_DATA : BlockType.DATA;
|
||||
encoderType.encode ? BlockType.ENCODED_DATA : BlockType.DATA;
|
||||
if (useTags) {
|
||||
assertEquals("{" + cachedDataBlockType
|
||||
+ "=1550, LEAF_INDEX=173, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=20}", countByType);
|
||||
|
@ -309,8 +305,7 @@ public class TestCacheOnWrite {
|
|||
"test_cache_on_write");
|
||||
HFileContext meta = new HFileContextBuilder().withCompression(compress)
|
||||
.withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL)
|
||||
.withBlockSize(DATA_BLOCK_SIZE).withDataBlockEncodingInCache(encoder.getEncodingInCache())
|
||||
.withDataBlockEncodingOnDisk(encoder.getEncodingOnDisk())
|
||||
.withBlockSize(DATA_BLOCK_SIZE).withDataBlockEncoding(encoder.getDataBlockEncoding())
|
||||
.withIncludesTags(useTags).build();
|
||||
StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs)
|
||||
.withOutputDir(storeFileParentDir).withComparator(KeyValue.COMPARATOR)
|
||||
|
@ -376,9 +371,7 @@ public class TestCacheOnWrite {
|
|||
.setCompressionType(compress)
|
||||
.setBloomFilterType(BLOOM_TYPE)
|
||||
.setMaxVersions(maxVersions)
|
||||
.setDataBlockEncoding(encoder.getEncodingInCache())
|
||||
.setEncodeOnDisk(encoder.getEncodingOnDisk() !=
|
||||
DataBlockEncoding.NONE)
|
||||
.setDataBlockEncoding(encoder.getDataBlockEncoding())
|
||||
);
|
||||
int rowIdx = 0;
|
||||
long ts = EnvironmentEdgeManager.currentTimeMillis();
|
||||
|
@ -416,6 +409,7 @@ public class TestCacheOnWrite {
|
|||
Map<BlockType, Integer> blockTypesInCache =
|
||||
blockCache.getBlockTypeCountsForTest();
|
||||
LOG.debug("Block types in cache: " + blockTypesInCache);
|
||||
assertNull(blockTypesInCache.get(BlockType.ENCODED_DATA));
|
||||
assertNull(blockTypesInCache.get(BlockType.DATA));
|
||||
region.close();
|
||||
blockCache.shutdown();
|
||||
|
|
|
@ -260,8 +260,7 @@ public class TestHFileBlockCompatibility {
|
|||
+ algo + "_" + encoding.toString());
|
||||
FSDataOutputStream os = fs.create(path);
|
||||
HFileDataBlockEncoder dataBlockEncoder =
|
||||
new HFileDataBlockEncoderImpl(encoding, encoding,
|
||||
TestHFileBlockCompatibility.Writer.DUMMY_HEADER);
|
||||
new HFileDataBlockEncoderImpl(encoding);
|
||||
TestHFileBlockCompatibility.Writer hbw =
|
||||
new TestHFileBlockCompatibility.Writer(algo,
|
||||
dataBlockEncoder, includesMemstoreTS, includesTag);
|
||||
|
@ -429,7 +428,7 @@ public class TestHFileBlockCompatibility {
|
|||
.build();
|
||||
defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null, DUMMY_HEADER, meta);
|
||||
dataBlockEncodingCtx =
|
||||
this.dataBlockEncoder.newOnDiskDataBlockEncodingContext(
|
||||
this.dataBlockEncoder.newDataBlockEncodingContext(
|
||||
DUMMY_HEADER, meta);
|
||||
baosInMemory = new ByteArrayOutputStream();
|
||||
|
||||
|
|
|
@ -25,8 +25,6 @@ import java.util.ArrayList;
|
|||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.SmallTests;
|
||||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
|
@ -36,7 +34,6 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
|
|||
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
|
||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||
import org.apache.hadoop.hbase.util.test.RedundantKVGenerator;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.runner.RunWith;
|
||||
|
@ -46,9 +43,6 @@ import org.junit.runners.Parameterized.Parameters;
|
|||
@RunWith(Parameterized.class)
|
||||
@Category(SmallTests.class)
|
||||
public class TestHFileDataBlockEncoder {
|
||||
private Configuration conf;
|
||||
private final HBaseTestingUtility TEST_UTIL =
|
||||
new HBaseTestingUtility();
|
||||
private HFileDataBlockEncoderImpl blockEncoder;
|
||||
private RedundantKVGenerator generator = new RedundantKVGenerator();
|
||||
private boolean includesMemstoreTS;
|
||||
|
@ -61,34 +55,26 @@ public class TestHFileDataBlockEncoder {
|
|||
boolean includesMemstoreTS) {
|
||||
this.blockEncoder = blockEncoder;
|
||||
this.includesMemstoreTS = includesMemstoreTS;
|
||||
System.err.println("On-disk encoding: " + blockEncoder.getEncodingOnDisk()
|
||||
+ ", in-cache encoding: " + blockEncoder.getEncodingInCache()
|
||||
System.err.println("Encoding: " + blockEncoder.getDataBlockEncoding()
|
||||
+ ", includesMemstoreTS: " + includesMemstoreTS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Preparation before JUnit test.
|
||||
*/
|
||||
@Before
|
||||
public void setUp() {
|
||||
conf = TEST_UTIL.getConfiguration();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test putting and taking out blocks into cache with different
|
||||
* encoding options.
|
||||
*/
|
||||
@Test
|
||||
public void testEncodingWithCache() {
|
||||
public void testEncodingWithCache() throws IOException {
|
||||
testEncodingWithCacheInternals(false);
|
||||
testEncodingWithCacheInternals(true);
|
||||
}
|
||||
|
||||
private void testEncodingWithCacheInternals(boolean useTag) {
|
||||
private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
|
||||
HFileBlock block = getSampleHFileBlock(useTag);
|
||||
HFileBlock cacheBlock = createBlockOnDisk(block, useTag);
|
||||
|
||||
LruBlockCache blockCache =
|
||||
new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
|
||||
HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
|
||||
BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
|
||||
blockCache.cacheBlock(cacheKey, cacheBlock);
|
||||
|
||||
|
@ -97,7 +83,7 @@ public class TestHFileDataBlockEncoder {
|
|||
|
||||
HFileBlock returnedBlock = (HFileBlock) heapSize;;
|
||||
|
||||
if (blockEncoder.getEncodingInCache() ==
|
||||
if (blockEncoder.getDataBlockEncoding() ==
|
||||
DataBlockEncoding.NONE) {
|
||||
assertEquals(block.getBufferWithHeader(),
|
||||
returnedBlock.getBufferWithHeader());
|
||||
|
@ -135,15 +121,14 @@ public class TestHFileDataBlockEncoder {
|
|||
HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
|
||||
HFileBlock.FILL_HEADER, 0,
|
||||
0, hfileContext);
|
||||
HFileBlock cacheBlock = blockEncoder
|
||||
.diskToCacheFormat(createBlockOnDisk(block, useTags), false);
|
||||
HFileBlock cacheBlock = createBlockOnDisk(block, useTags);
|
||||
assertEquals(headerSize, cacheBlock.getDummyHeaderForVersion().length);
|
||||
}
|
||||
|
||||
private HFileBlock createBlockOnDisk(HFileBlock block, boolean useTags) throws IOException {
|
||||
int size;
|
||||
HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
|
||||
blockEncoder.getEncodingOnDisk(),
|
||||
blockEncoder.getDataBlockEncoding(),
|
||||
HConstants.HFILEBLOCK_DUMMY_HEADER, block.getHFileContext());
|
||||
context.setDummyHeader(block.getDummyHeaderForVersion());
|
||||
blockEncoder.beforeWriteToDisk(block.getBufferWithoutHeader(), context, block.getBlockType());
|
||||
|
@ -155,44 +140,30 @@ public class TestHFileDataBlockEncoder {
|
|||
}
|
||||
|
||||
/**
|
||||
* Test writing to disk.
|
||||
* Test encoding.
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testEncodingWritePath() throws IOException {
|
||||
testEncodingWritePathInternals(false);
|
||||
testEncodingWritePathInternals(true);
|
||||
public void testEncoding() throws IOException {
|
||||
testEncodingInternals(false);
|
||||
testEncodingInternals(true);
|
||||
}
|
||||
|
||||
private void testEncodingWritePathInternals(boolean useTag) throws IOException {
|
||||
private void testEncodingInternals(boolean useTag) throws IOException {
|
||||
// usually we have just block without headers, but don't complicate that
|
||||
HFileBlock block = getSampleHFileBlock(useTag);
|
||||
HFileBlock blockOnDisk = createBlockOnDisk(block, useTag);
|
||||
|
||||
if (blockEncoder.getEncodingOnDisk() !=
|
||||
if (blockEncoder.getDataBlockEncoding() !=
|
||||
DataBlockEncoding.NONE) {
|
||||
assertEquals(BlockType.ENCODED_DATA, blockOnDisk.getBlockType());
|
||||
assertEquals(blockEncoder.getEncodingOnDisk().getId(),
|
||||
assertEquals(blockEncoder.getDataBlockEncoding().getId(),
|
||||
blockOnDisk.getDataBlockEncodingId());
|
||||
} else {
|
||||
assertEquals(BlockType.DATA, blockOnDisk.getBlockType());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test converting blocks from disk to cache format.
|
||||
*/
|
||||
@Test
|
||||
public void testEncodingReadPath() {
|
||||
testEncodingReadPathInternals(false);
|
||||
testEncodingReadPathInternals(true);
|
||||
}
|
||||
|
||||
private void testEncodingReadPathInternals(boolean useTag) {
|
||||
HFileBlock origBlock = getSampleHFileBlock(useTag);
|
||||
blockEncoder.diskToCacheFormat(origBlock, false);
|
||||
}
|
||||
|
||||
private HFileBlock getSampleHFileBlock(boolean useTag) {
|
||||
ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(
|
||||
generator.generateTestKeyValues(60, useTag), includesMemstoreTS);
|
||||
|
@ -224,17 +195,10 @@ public class TestHFileDataBlockEncoder {
|
|||
new ArrayList<Object[]>();
|
||||
|
||||
for (DataBlockEncoding diskAlgo : DataBlockEncoding.values()) {
|
||||
for (DataBlockEncoding cacheAlgo : DataBlockEncoding.values()) {
|
||||
if (diskAlgo != cacheAlgo && diskAlgo != DataBlockEncoding.NONE) {
|
||||
// We allow (1) the same encoding on disk and in cache, and
|
||||
// (2) some encoding in cache but no encoding on disk (for testing).
|
||||
continue;
|
||||
}
|
||||
for (boolean includesMemstoreTS : new boolean[] {false, true}) {
|
||||
configurations.add(new Object[] {
|
||||
new HFileDataBlockEncoderImpl(diskAlgo, cacheAlgo),
|
||||
new Boolean(includesMemstoreTS)});
|
||||
}
|
||||
for (boolean includesMemstoreTS : new boolean[] {false, true}) {
|
||||
configurations.add(new Object[] {
|
||||
new HFileDataBlockEncoderImpl(diskAlgo),
|
||||
new Boolean(includesMemstoreTS)});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileBlock;
|
|||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileReaderV2;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.compress.CompressionOutputStream;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
|
@ -594,7 +593,7 @@ public class DataBlockEncodingTool {
|
|||
CacheConfig cacheConf = new CacheConfig(conf);
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
|
||||
StoreFile.Reader reader = hsf.createReader();
|
||||
reader.loadFileInfo();
|
||||
|
|
|
@ -27,11 +27,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
|
||||
/**
|
||||
* Test seek performance for encoded data blocks. Read an HFile and do several
|
||||
|
@ -61,8 +58,7 @@ public class EncodedSeekPerformanceTest {
|
|||
|
||||
// read all of the key values
|
||||
StoreFile storeFile = new StoreFile(testingUtility.getTestFileSystem(),
|
||||
path, configuration, cacheConf, BloomType.NONE,
|
||||
NoOpDataBlockEncoder.INSTANCE);
|
||||
path, configuration, cacheConf, BloomType.NONE);
|
||||
|
||||
StoreFile.Reader reader = storeFile.createReader();
|
||||
StoreFileScanner scanner = reader.getStoreFileScanner(true, false);
|
||||
|
@ -88,11 +84,11 @@ public class EncodedSeekPerformanceTest {
|
|||
return seeks;
|
||||
}
|
||||
|
||||
private void runTest(Path path, HFileDataBlockEncoder blockEncoder,
|
||||
private void runTest(Path path, DataBlockEncoding blockEncoding,
|
||||
List<KeyValue> seeks) throws IOException {
|
||||
// read all of the key values
|
||||
StoreFile storeFile = new StoreFile(testingUtility.getTestFileSystem(),
|
||||
path, configuration, cacheConf, BloomType.NONE, blockEncoder);
|
||||
path, configuration, cacheConf, BloomType.NONE);
|
||||
|
||||
long totalSize = 0;
|
||||
|
||||
|
@ -137,7 +133,7 @@ public class EncodedSeekPerformanceTest {
|
|||
storeFile.closeReader(cacheConf.shouldEvictOnClose());
|
||||
clearBlockCache();
|
||||
|
||||
System.out.println(blockEncoder);
|
||||
System.out.println(blockEncoding);
|
||||
System.out.printf(" Read speed: %8.2f (MB/s)\n", readInMbPerSec);
|
||||
System.out.printf(" Seeks per second: %8.2f (#/s)\n", seeksPerSec);
|
||||
System.out.printf(" Total KV size: %d\n", totalSize);
|
||||
|
@ -148,12 +144,12 @@ public class EncodedSeekPerformanceTest {
|
|||
* @param encoders List of encoders which will be used for tests.
|
||||
* @throws IOException if there is a bug while reading from disk
|
||||
*/
|
||||
public void runTests(Path path, List<HFileDataBlockEncoder> encoders)
|
||||
public void runTests(Path path, DataBlockEncoding[] encodings)
|
||||
throws IOException {
|
||||
List<KeyValue> seeks = prepareListOfTestSeeks(path);
|
||||
|
||||
for (HFileDataBlockEncoder blockEncoder : encoders) {
|
||||
runTest(path, blockEncoder, seeks);
|
||||
for (DataBlockEncoding blockEncoding : encodings) {
|
||||
runTest(path, blockEncoding, seeks);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -169,16 +165,10 @@ public class EncodedSeekPerformanceTest {
|
|||
}
|
||||
|
||||
Path path = new Path(args[0]);
|
||||
List<HFileDataBlockEncoder> encoders =
|
||||
new ArrayList<HFileDataBlockEncoder>();
|
||||
|
||||
for (DataBlockEncoding encodingAlgo : DataBlockEncoding.values()) {
|
||||
encoders.add(new HFileDataBlockEncoderImpl(DataBlockEncoding.NONE,
|
||||
encodingAlgo));
|
||||
}
|
||||
|
||||
// TODO, this test doesn't work as expected any more. Need to fix.
|
||||
EncodedSeekPerformanceTest utility = new EncodedSeekPerformanceTest();
|
||||
utility.runTests(path, encoders);
|
||||
utility.runTests(path, DataBlockEncoding.values());
|
||||
|
||||
System.exit(0);
|
||||
}
|
||||
|
|
|
@ -61,10 +61,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
|||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.LoadTestTool;
|
||||
import org.apache.hadoop.hbase.util.MD5Hash;
|
||||
|
@ -146,9 +143,6 @@ public class HFileReadWriteTest {
|
|||
private int numReadThreads;
|
||||
private int durationSec;
|
||||
private DataBlockEncoding dataBlockEncoding;
|
||||
private boolean encodeInCacheOnly;
|
||||
private HFileDataBlockEncoder dataBlockEncoder =
|
||||
NoOpDataBlockEncoder.INSTANCE;
|
||||
|
||||
private BloomType bloomType = BloomType.NONE;
|
||||
private int blockSize;
|
||||
|
@ -194,8 +188,6 @@ public class HFileReadWriteTest {
|
|||
"reader threads" + Workload.RANDOM_READS.onlyUsedFor());
|
||||
options.addOption(LoadTestTool.OPT_DATA_BLOCK_ENCODING, true,
|
||||
LoadTestTool.OPT_DATA_BLOCK_ENCODING_USAGE);
|
||||
options.addOption(LoadTestTool.OPT_ENCODE_IN_CACHE_ONLY, false,
|
||||
LoadTestTool.OPT_ENCODE_IN_CACHE_ONLY_USAGE);
|
||||
options.addOptionGroup(Workload.getOptionGroup());
|
||||
|
||||
if (args.length == 0) {
|
||||
|
@ -247,23 +239,9 @@ public class HFileReadWriteTest {
|
|||
BLOOM_FILTER_OPTION));
|
||||
}
|
||||
|
||||
encodeInCacheOnly =
|
||||
cmdLine.hasOption(LoadTestTool.OPT_ENCODE_IN_CACHE_ONLY);
|
||||
|
||||
if (cmdLine.hasOption(LoadTestTool.OPT_DATA_BLOCK_ENCODING)) {
|
||||
dataBlockEncoding = DataBlockEncoding.valueOf(
|
||||
cmdLine.getOptionValue(LoadTestTool.OPT_DATA_BLOCK_ENCODING));
|
||||
// Optionally encode on disk, always encode in cache.
|
||||
dataBlockEncoder = new HFileDataBlockEncoderImpl(
|
||||
encodeInCacheOnly ? DataBlockEncoding.NONE : dataBlockEncoding,
|
||||
dataBlockEncoding);
|
||||
} else {
|
||||
if (encodeInCacheOnly) {
|
||||
LOG.error("The -" + LoadTestTool.OPT_ENCODE_IN_CACHE_ONLY +
|
||||
" option does not make sense without -" +
|
||||
LoadTestTool.OPT_DATA_BLOCK_ENCODING);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
blockSize = conf.getInt("hfile.min.blocksize.size", 65536);
|
||||
|
@ -463,7 +441,7 @@ public class HFileReadWriteTest {
|
|||
// We are passing the ROWCOL Bloom filter type, but StoreFile will still
|
||||
// use the Bloom filter type specified in the HFile.
|
||||
return new StoreFile(fs, filePath, conf, cacheConf,
|
||||
BloomType.ROWCOL, dataBlockEncoder);
|
||||
BloomType.ROWCOL);
|
||||
}
|
||||
|
||||
public static int charToHex(int c) {
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/** A mock used so our tests don't deal with actual StoreFiles */
|
||||
|
@ -41,8 +40,7 @@ public class MockStoreFile extends StoreFile {
|
|||
MockStoreFile(HBaseTestingUtility testUtil, Path testPath,
|
||||
long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException {
|
||||
super(testUtil.getTestFileSystem(), testPath, testUtil.getConfiguration(),
|
||||
new CacheConfig(testUtil.getConfiguration()), BloomType.NONE,
|
||||
NoOpDataBlockEncoder.INSTANCE);
|
||||
new CacheConfig(testUtil.getConfiguration()), BloomType.NONE);
|
||||
this.length = length;
|
||||
this.isRef = isRef;
|
||||
this.ageInDisk = ageInDisk;
|
||||
|
|
|
@ -216,7 +216,7 @@ public class TestCacheOnWriteInSchema {
|
|||
CacheConfig cacheConf = store.getCacheConfig();
|
||||
BlockCache cache = cacheConf.getBlockCache();
|
||||
StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
|
||||
BloomType.ROWCOL, null);
|
||||
BloomType.ROWCOL);
|
||||
HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
|
||||
try {
|
||||
// Open a scanner with (on read) caching disabled
|
||||
|
|
|
@ -72,7 +72,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
|||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.Compactor;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
|
@ -212,8 +211,7 @@ public class TestCompaction {
|
|||
final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
|
||||
final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
|
||||
inCache;
|
||||
store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(
|
||||
onDisk, inCache));
|
||||
store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk));
|
||||
}
|
||||
|
||||
majorCompaction();
|
||||
|
|
|
@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
|||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
|
||||
import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
||||
import org.apache.hadoop.hbase.util.ByteBloomFilter;
|
||||
|
@ -196,8 +195,7 @@ public class TestCompoundBloomFilter {
|
|||
|
||||
private void readStoreFile(int t, BloomType bt, List<KeyValue> kvs,
|
||||
Path sfPath) throws IOException {
|
||||
StoreFile sf = new StoreFile(fs, sfPath, conf, cacheConf, bt,
|
||||
NoOpDataBlockEncoder.INSTANCE);
|
||||
StoreFile sf = new StoreFile(fs, sfPath, conf, cacheConf, bt);
|
||||
StoreFile.Reader r = sf.createReader();
|
||||
final boolean pread = true; // does not really matter
|
||||
StoreFileScanner scanner = r.getStoreFileScanner(true, pread);
|
||||
|
|
|
@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
|||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Test;
|
||||
|
@ -90,8 +89,7 @@ public class TestFSErrorsExposed {
|
|||
writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
|
||||
|
||||
StoreFile sf = new StoreFile(fs, writer.getPath(),
|
||||
util.getConfiguration(), cacheConf, BloomType.NONE,
|
||||
NoOpDataBlockEncoder.INSTANCE);
|
||||
util.getConfiguration(), cacheConf, BloomType.NONE);
|
||||
|
||||
StoreFile.Reader reader = sf.createReader();
|
||||
HFileScanner scanner = reader.getScanner(false, true);
|
||||
|
@ -141,7 +139,7 @@ public class TestFSErrorsExposed {
|
|||
writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
|
||||
|
||||
StoreFile sf = new StoreFile(fs, writer.getPath(), util.getConfiguration(),
|
||||
cacheConf, BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
cacheConf, BloomType.NONE);
|
||||
|
||||
List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(
|
||||
Collections.singletonList(sf), false, true, false,
|
||||
|
|
|
@ -148,6 +148,7 @@ public class TestStore extends TestCase {
|
|||
init(methodName, conf, htd, hcd);
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
private void init(String methodName, Configuration conf, HTableDescriptor htd,
|
||||
HColumnDescriptor hcd) throws IOException {
|
||||
//Setting up a Store
|
||||
|
@ -193,7 +194,7 @@ public class TestStore extends TestCase {
|
|||
// Verify that compression and encoding settings are respected
|
||||
HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf));
|
||||
assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
|
||||
assertEquals(hcd.getDataBlockEncoding(), reader.getEncodingOnDisk());
|
||||
assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
|
||||
reader.close();
|
||||
}
|
||||
|
||||
|
@ -559,7 +560,7 @@ public class TestStore extends TestCase {
|
|||
|
||||
long computedSize=0;
|
||||
for (KeyValue kv : this.store.memstore.kvset) {
|
||||
long kvsize = this.store.memstore.heapSizeChange(kv, true);
|
||||
long kvsize = MemStore.heapSizeChange(kv, true);
|
||||
//System.out.println(kv + " size= " + kvsize + " kvsize= " + kv.heapSize());
|
||||
computedSize += kvsize;
|
||||
}
|
||||
|
|
|
@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
|||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||
|
@ -106,7 +105,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
|
||||
Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
|
||||
StoreFile sf = new StoreFile(this.fs, sfPath, conf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
checkHalfHFile(regionFs, sf);
|
||||
}
|
||||
|
||||
|
@ -158,7 +157,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
|
||||
Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
|
||||
StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
StoreFile.Reader reader = hsf.createReader();
|
||||
// Split on a row, not in middle of row. Midkey returned by reader
|
||||
// may be in middle of row. Create new one with empty column and
|
||||
|
@ -171,7 +170,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow);
|
||||
Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true);
|
||||
StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
// Now confirm that I can read from the reference and that it only gets
|
||||
// keys from top half of the file.
|
||||
HFileScanner s = refHsf.createReader().getScanner(false, false);
|
||||
|
@ -211,7 +210,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
// Try to open store file from link
|
||||
StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath);
|
||||
StoreFile hsf = new StoreFile(this.fs, storeFileInfo, testConf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
assertTrue(storeFileInfo.isLink());
|
||||
|
||||
// Now confirm that I can read from the link
|
||||
|
@ -262,8 +261,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
// <root>/clone/splitB/<cf>/<reftohfilelink>
|
||||
HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY);
|
||||
HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null);
|
||||
StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE,
|
||||
NoOpDataBlockEncoder.INSTANCE);
|
||||
StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE);
|
||||
Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top
|
||||
Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom
|
||||
|
||||
|
@ -275,7 +273,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
|
||||
// Try to open store file from link
|
||||
StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
|
||||
// Now confirm that I can read from the ref to link
|
||||
int count = 1;
|
||||
|
@ -288,7 +286,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
|
||||
// Try to open store file from link
|
||||
StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
|
||||
// Now confirm that I can read from the ref to link
|
||||
HFileScanner sB = hsfB.createReader().getScanner(false, false);
|
||||
|
@ -318,10 +316,10 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
midRow, null);
|
||||
Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false);
|
||||
// Make readers on top and bottom.
|
||||
StoreFile.Reader top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
|
||||
NoOpDataBlockEncoder.INSTANCE).createReader();
|
||||
StoreFile.Reader bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE,
|
||||
NoOpDataBlockEncoder.INSTANCE).createReader();
|
||||
StoreFile.Reader top = new StoreFile(
|
||||
this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader();
|
||||
StoreFile.Reader bottom = new StoreFile(
|
||||
this.fs, bottomPath, conf, cacheConf, BloomType.NONE).createReader();
|
||||
ByteBuffer previous = null;
|
||||
LOG.info("Midkey: " + midKV.toString());
|
||||
ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midkey);
|
||||
|
@ -379,8 +377,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
|
||||
assertNull(bottomPath);
|
||||
|
||||
top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
|
||||
NoOpDataBlockEncoder.INSTANCE).createReader();
|
||||
top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader();
|
||||
// Now read from the top.
|
||||
first = true;
|
||||
topScanner = top.getScanner(false, false);
|
||||
|
@ -414,8 +411,8 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
topPath = splitStoreFile(regionFs,topHri, TEST_FAMILY, f, badmidkey, true);
|
||||
bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
|
||||
assertNull(topPath);
|
||||
bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE,
|
||||
NoOpDataBlockEncoder.INSTANCE).createReader();
|
||||
bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
|
||||
BloomType.NONE).createReader();
|
||||
first = true;
|
||||
bottomScanner = bottom.getScanner(false, false);
|
||||
while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
|
||||
|
@ -461,8 +458,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
}
|
||||
writer.close();
|
||||
|
||||
StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf,
|
||||
DataBlockEncoding.NONE);
|
||||
StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf);
|
||||
reader.loadFileInfo();
|
||||
reader.loadBloomfilter();
|
||||
StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
|
||||
|
@ -543,7 +539,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
}
|
||||
writer.close();
|
||||
|
||||
StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, DataBlockEncoding.NONE);
|
||||
StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf);
|
||||
reader.loadFileInfo();
|
||||
reader.loadBloomfilter();
|
||||
|
||||
|
@ -588,7 +584,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
writeStoreFile(writer);
|
||||
writer.close();
|
||||
|
||||
StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, DataBlockEncoding.NONE);
|
||||
StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf);
|
||||
|
||||
// Now do reseek with empty KV to position to the beginning of the file
|
||||
|
||||
|
@ -647,8 +643,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
}
|
||||
writer.close();
|
||||
|
||||
StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf,
|
||||
DataBlockEncoding.NONE);
|
||||
StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf);
|
||||
reader.loadFileInfo();
|
||||
reader.loadBloomfilter();
|
||||
StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
|
||||
|
@ -793,7 +788,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
writer.close();
|
||||
|
||||
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
StoreFile.Reader reader = hsf.createReader();
|
||||
StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
|
||||
TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
|
||||
|
@ -836,7 +831,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
Path pathCowOff = new Path(baseDir, "123456789");
|
||||
StoreFile.Writer writer = writeStoreFile(conf, cacheConf, pathCowOff, 3);
|
||||
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
LOG.debug(hsf.getPath().toString());
|
||||
|
||||
// Read this file, we should see 3 misses
|
||||
|
@ -858,7 +853,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
Path pathCowOn = new Path(baseDir, "123456788");
|
||||
writer = writeStoreFile(conf, cacheConf, pathCowOn, 3);
|
||||
hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
|
||||
// Read this file, we should see 3 hits
|
||||
reader = hsf.createReader();
|
||||
|
@ -874,13 +869,13 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
|
||||
// Let's read back the two files to ensure the blocks exactly match
|
||||
hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
StoreFile.Reader readerOne = hsf.createReader();
|
||||
readerOne.loadFileInfo();
|
||||
StoreFileScanner scannerOne = readerOne.getStoreFileScanner(true, true);
|
||||
scannerOne.seek(KeyValue.LOWESTKEY);
|
||||
hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
StoreFile.Reader readerTwo = hsf.createReader();
|
||||
readerTwo.loadFileInfo();
|
||||
StoreFileScanner scannerTwo = readerTwo.getStoreFileScanner(true, true);
|
||||
|
@ -911,7 +906,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
conf.setBoolean("hbase.rs.evictblocksonclose", true);
|
||||
cacheConf = new CacheConfig(conf);
|
||||
hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
reader = hsf.createReader();
|
||||
reader.close(cacheConf.shouldEvictOnClose());
|
||||
|
||||
|
@ -925,7 +920,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
conf.setBoolean("hbase.rs.evictblocksonclose", false);
|
||||
cacheConf = new CacheConfig(conf);
|
||||
hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
BloomType.NONE);
|
||||
reader = hsf.createReader();
|
||||
reader.close(cacheConf.shouldEvictOnClose());
|
||||
|
||||
|
@ -995,14 +990,12 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
DataBlockEncoding.FAST_DIFF;
|
||||
HFileDataBlockEncoder dataBlockEncoder =
|
||||
new HFileDataBlockEncoderImpl(
|
||||
dataBlockEncoderAlgo,
|
||||
dataBlockEncoderAlgo);
|
||||
cacheConf = new CacheConfig(conf);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL)
|
||||
.withChecksumType(CKTYPE)
|
||||
.withBytesPerCheckSum(CKBYTES)
|
||||
.withDataBlockEncodingInCache(dataBlockEncoderAlgo)
|
||||
.withDataBlockEncodingOnDisk(dataBlockEncoderAlgo)
|
||||
.withDataBlockEncoding(dataBlockEncoderAlgo)
|
||||
.build();
|
||||
// Make a store file and write data to it.
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||
|
@ -1013,7 +1006,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
writer.close();
|
||||
|
||||
StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
|
||||
cacheConf, BloomType.NONE, dataBlockEncoder);
|
||||
cacheConf, BloomType.NONE);
|
||||
StoreFile.Reader reader = storeFile.createReader();
|
||||
|
||||
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
|
||||
|
|
|
@ -96,11 +96,6 @@ public class LoadTestTool extends AbstractHBaseTool {
|
|||
private static final String OPT_COMPRESSION = "compression";
|
||||
public static final String OPT_DATA_BLOCK_ENCODING =
|
||||
HColumnDescriptor.DATA_BLOCK_ENCODING.toLowerCase();
|
||||
public static final String OPT_ENCODE_IN_CACHE_ONLY =
|
||||
"encode_in_cache_only";
|
||||
public static final String OPT_ENCODE_IN_CACHE_ONLY_USAGE =
|
||||
"If this is specified, data blocks will only be encoded in block " +
|
||||
"cache but not on disk";
|
||||
|
||||
public static final String OPT_INMEMORY = "in_memory";
|
||||
public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " +
|
||||
|
@ -143,7 +138,6 @@ public class LoadTestTool extends AbstractHBaseTool {
|
|||
|
||||
// Column family options
|
||||
protected DataBlockEncoding dataBlockEncodingAlgo;
|
||||
protected boolean encodeInCacheOnly;
|
||||
protected Compression.Algorithm compressAlgo;
|
||||
protected BloomType bloomType;
|
||||
private boolean inMemoryCF;
|
||||
|
@ -215,7 +209,6 @@ public class LoadTestTool extends AbstractHBaseTool {
|
|||
}
|
||||
if (dataBlockEncodingAlgo != null) {
|
||||
columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo);
|
||||
columnDesc.setEncodeOnDisk(!encodeInCacheOnly);
|
||||
}
|
||||
if (inMemoryCF) {
|
||||
columnDesc.setInMemory(inMemoryCF);
|
||||
|
@ -253,7 +246,6 @@ public class LoadTestTool extends AbstractHBaseTool {
|
|||
"separate puts for every column in a row");
|
||||
addOptNoArg(OPT_BATCHUPDATE, "Whether to use batch as opposed to " +
|
||||
"separate updates for every column in a row");
|
||||
addOptNoArg(OPT_ENCODE_IN_CACHE_ONLY, OPT_ENCODE_IN_CACHE_ONLY_USAGE);
|
||||
addOptNoArg(OPT_INMEMORY, OPT_USAGE_IN_MEMORY);
|
||||
addOptNoArg(OPT_USETAGS, OPT_USAGE_USETAG);
|
||||
addOptWithArg(OPT_NUM_TAGS, OPT_USAGE_NUM_TAGS + " The default is 1:1");
|
||||
|
@ -307,7 +299,6 @@ public class LoadTestTool extends AbstractHBaseTool {
|
|||
System.out.println("Key range: [" + startKey + ".." + (endKey - 1) + "]");
|
||||
}
|
||||
|
||||
encodeInCacheOnly = cmd.hasOption(OPT_ENCODE_IN_CACHE_ONLY);
|
||||
parseColumnFamilyOptions(cmd);
|
||||
|
||||
if (isWrite) {
|
||||
|
@ -381,10 +372,6 @@ public class LoadTestTool extends AbstractHBaseTool {
|
|||
String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
|
||||
dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
|
||||
DataBlockEncoding.valueOf(dataBlockEncodingStr);
|
||||
if (dataBlockEncodingAlgo == DataBlockEncoding.NONE && encodeInCacheOnly) {
|
||||
throw new IllegalArgumentException("-" + OPT_ENCODE_IN_CACHE_ONLY + " " +
|
||||
"does not make sense when data block encoding is not used");
|
||||
}
|
||||
|
||||
String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
|
||||
compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
|
||||
|
|
|
@ -614,7 +614,6 @@ module Hbase
|
|||
family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY)
|
||||
family.setTimeToLive(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL)
|
||||
family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING)
|
||||
family.setEncodeOnDisk(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK)
|
||||
family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE)
|
||||
family.setMaxVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS)
|
||||
family.setMinVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS)
|
||||
|
|
|
@ -91,7 +91,7 @@ DESCRIPTION
|
|||
't', {NAME => 'f', DATA_BLOCK_ENCODING => 'NONE', BLOOMFILTER => 'ROW', REPLICATION_ true
|
||||
SCOPE => '0', VERSIONS => '1', COMPRESSION => 'NONE', MIN_VERSIONS => '0', TTL => '2
|
||||
147483647', KEEP_DELETED_CELLS => 'false', BLOCKSIZE => '65536', IN_MEMORY => 'false
|
||||
', ENCODE_ON_DISK => 'true', BLOCKCACHE => 'true'}
|
||||
', BLOCKCACHE => 'true'}
|
||||
1 row(s) in 1.4430 seconds
|
||||
|
||||
hbase(main):004:0> disable 't'
|
||||
|
@ -122,7 +122,7 @@ DESCRIPTION
|
|||
't', {NAME => 'f', DATA_BLOCK_ENCODING => 'NONE', BLOOMFILTER => 'ROW', REPLICATION_ true
|
||||
SCOPE => '0', VERSIONS => '1', COMPRESSION => 'NONE', MIN_VERSIONS => '0', TTL => '2
|
||||
147483647', KEEP_DELETED_CELLS => 'false', BLOCKSIZE => '65536', IN_MEMORY => 'false
|
||||
', ENCODE_ON_DISK => 'true', BLOCKCACHE => 'true'}
|
||||
', BLOCKCACHE => 'true'}
|
||||
1 row(s) in 0.0210 seconds
|
||||
hbase(main):038:0> t.disable
|
||||
0 row(s) in 6.2350 seconds
|
||||
|
|
Loading…
Reference in New Issue