HBASE-9546-HFileContext should adopt Builder pattern (Ram)
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1530567 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
3993047970
commit
7dd240c8ad
|
@ -59,27 +59,52 @@ public class HFileContext implements HeapSize, Cloneable {
|
||||||
//Empty constructor. Go with setters
|
//Empty constructor. Go with setters
|
||||||
public HFileContext() {
|
public HFileContext() {
|
||||||
}
|
}
|
||||||
|
/**
|
||||||
|
* Copy constructor
|
||||||
|
* @param context
|
||||||
|
*/
|
||||||
|
public HFileContext(HFileContext context) {
|
||||||
|
this.usesHBaseChecksum = context.usesHBaseChecksum;
|
||||||
|
this.includesMvcc = context.includesMvcc;
|
||||||
|
this.includesTags = context.includesTags;
|
||||||
|
this.compressAlgo = context.compressAlgo;
|
||||||
|
this.compressTags = context.compressTags;
|
||||||
|
this.checksumType = context.checksumType;
|
||||||
|
this.bytesPerChecksum = context.bytesPerChecksum;
|
||||||
|
this.blocksize = context.blocksize;
|
||||||
|
this.encodingOnDisk = context.encodingOnDisk;
|
||||||
|
this.encodingInCache = context.encodingInCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
|
||||||
|
Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
|
||||||
|
int bytesPerChecksum, int blockSize, DataBlockEncoding encodingOnDisk,
|
||||||
|
DataBlockEncoding encodingInCache) {
|
||||||
|
this.usesHBaseChecksum = useHBaseChecksum;
|
||||||
|
this.includesMvcc = includesMvcc;
|
||||||
|
this.includesTags = includesTags;
|
||||||
|
this.compressAlgo = compressAlgo;
|
||||||
|
this.compressTags = compressTags;
|
||||||
|
this.checksumType = checksumType;
|
||||||
|
this.bytesPerChecksum = bytesPerChecksum;
|
||||||
|
this.blocksize = blockSize;
|
||||||
|
this.encodingOnDisk = encodingOnDisk;
|
||||||
|
this.encodingInCache = encodingInCache;
|
||||||
|
}
|
||||||
|
|
||||||
public Algorithm getCompression() {
|
public Algorithm getCompression() {
|
||||||
return compressAlgo;
|
return compressAlgo;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setCompressAlgo(Algorithm compressAlgo) {
|
|
||||||
this.compressAlgo = compressAlgo;
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean shouldUseHBaseChecksum() {
|
public boolean shouldUseHBaseChecksum() {
|
||||||
return usesHBaseChecksum;
|
return usesHBaseChecksum;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setUsesHBaseChecksum(boolean usesHBaseChecksum) {
|
|
||||||
this.usesHBaseChecksum = usesHBaseChecksum;
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean shouldIncludeMvcc() {
|
public boolean shouldIncludeMvcc() {
|
||||||
return includesMvcc;
|
return includesMvcc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO : This setter should be removed
|
||||||
public void setIncludesMvcc(boolean includesMvcc) {
|
public void setIncludesMvcc(boolean includesMvcc) {
|
||||||
this.includesMvcc = includesMvcc;
|
this.includesMvcc = includesMvcc;
|
||||||
}
|
}
|
||||||
|
@ -88,6 +113,7 @@ public class HFileContext implements HeapSize, Cloneable {
|
||||||
return includesTags;
|
return includesTags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO : This setter should be removed?
|
||||||
public void setIncludesTags(boolean includesTags) {
|
public void setIncludesTags(boolean includesTags) {
|
||||||
this.includesTags = includesTags;
|
this.includesTags = includesTags;
|
||||||
}
|
}
|
||||||
|
@ -96,50 +122,26 @@ public class HFileContext implements HeapSize, Cloneable {
|
||||||
return compressTags;
|
return compressTags;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setCompressTags(boolean compressTags) {
|
|
||||||
this.compressTags = compressTags;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ChecksumType getChecksumType() {
|
public ChecksumType getChecksumType() {
|
||||||
return checksumType;
|
return checksumType;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setChecksumType(ChecksumType checksumType) {
|
|
||||||
this.checksumType = checksumType;
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getBytesPerChecksum() {
|
public int getBytesPerChecksum() {
|
||||||
return bytesPerChecksum;
|
return bytesPerChecksum;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setBytesPerChecksum(int bytesPerChecksum) {
|
|
||||||
this.bytesPerChecksum = bytesPerChecksum;
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getBlocksize() {
|
public int getBlocksize() {
|
||||||
return blocksize;
|
return blocksize;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setBlocksize(int blocksize) {
|
|
||||||
this.blocksize = blocksize;
|
|
||||||
}
|
|
||||||
|
|
||||||
public DataBlockEncoding getEncodingOnDisk() {
|
public DataBlockEncoding getEncodingOnDisk() {
|
||||||
return encodingOnDisk;
|
return encodingOnDisk;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setEncodingOnDisk(DataBlockEncoding encodingOnDisk) {
|
|
||||||
this.encodingOnDisk = encodingOnDisk;
|
|
||||||
}
|
|
||||||
|
|
||||||
public DataBlockEncoding getEncodingInCache() {
|
public DataBlockEncoding getEncodingInCache() {
|
||||||
return encodingInCache;
|
return encodingInCache;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setEncodingInCache(DataBlockEncoding encodingInCache) {
|
|
||||||
this.encodingInCache = encodingInCache;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* HeapSize implementation
|
* HeapSize implementation
|
||||||
* NOTE : The heapsize should be altered as and when new state variable are added
|
* NOTE : The heapsize should be altered as and when new state variable are added
|
||||||
|
|
|
@ -0,0 +1,107 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.io.hfile;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||||
|
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||||
|
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||||
|
/**
|
||||||
|
* A builder that helps in building up the HFileContext
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public class HFileContextBuilder {
|
||||||
|
|
||||||
|
public static final int DEFAULT_BYTES_PER_CHECKSUM = 16 * 1024;
|
||||||
|
public static final ChecksumType DEFAULT_CHECKSUM_TYPE = ChecksumType.CRC32;
|
||||||
|
|
||||||
|
/** Whether checksum is enabled or not **/
|
||||||
|
private boolean usesHBaseChecksum = true;
|
||||||
|
/** Whether mvcc is to be included in the Read/Write **/
|
||||||
|
private boolean includesMvcc = true;
|
||||||
|
/** Whether tags are to be included in the Read/Write **/
|
||||||
|
private boolean includesTags;
|
||||||
|
/** Compression algorithm used **/
|
||||||
|
private Algorithm compressAlgo = Algorithm.NONE;
|
||||||
|
/** Whether tags to be compressed or not **/
|
||||||
|
private boolean compressTags;
|
||||||
|
/** the checksum type **/
|
||||||
|
private ChecksumType checksumType = DEFAULT_CHECKSUM_TYPE;
|
||||||
|
/** the number of bytes per checksum value **/
|
||||||
|
private int bytesPerChecksum = DEFAULT_BYTES_PER_CHECKSUM;
|
||||||
|
/** Number of uncompressed bytes we allow per block. */
|
||||||
|
private int blocksize = HConstants.DEFAULT_BLOCKSIZE;
|
||||||
|
private DataBlockEncoding encodingOnDisk = DataBlockEncoding.NONE;
|
||||||
|
private DataBlockEncoding encodingInCache = DataBlockEncoding.NONE;
|
||||||
|
|
||||||
|
public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) {
|
||||||
|
this.usesHBaseChecksum = useHBaseCheckSum;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HFileContextBuilder withIncludesMvcc(boolean includesMvcc) {
|
||||||
|
this.includesMvcc = includesMvcc;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HFileContextBuilder withIncludesTags(boolean includesTags) {
|
||||||
|
this.includesTags = includesTags;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HFileContextBuilder withCompressionAlgo(Algorithm compressionAlgo) {
|
||||||
|
this.compressAlgo = compressionAlgo;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HFileContextBuilder withCompressTags(boolean compressTags) {
|
||||||
|
this.compressTags = compressTags;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HFileContextBuilder withChecksumType(ChecksumType checkSumType) {
|
||||||
|
this.checksumType = checkSumType;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HFileContextBuilder withBytesPerCheckSum(int bytesPerChecksum) {
|
||||||
|
this.bytesPerChecksum = bytesPerChecksum;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HFileContextBuilder withBlockSize(int blockSize) {
|
||||||
|
this.blocksize = blockSize;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HFileContextBuilder withDataBlockEncodingOnDisk(DataBlockEncoding encodingOnDisk) {
|
||||||
|
this.encodingOnDisk = encodingOnDisk;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HFileContextBuilder withDataBlockEncodingInCache(DataBlockEncoding encodingInCache) {
|
||||||
|
this.encodingInCache = encodingInCache;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HFileContext build() {
|
||||||
|
return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compressAlgo,
|
||||||
|
compressTags, checksumType, bytesPerChecksum, blocksize, encodingOnDisk, encodingInCache);
|
||||||
|
}
|
||||||
|
}
|
|
@ -239,18 +239,19 @@ public class HFileBlock implements Cacheable {
|
||||||
onDiskSizeWithoutHeader = b.getInt();
|
onDiskSizeWithoutHeader = b.getInt();
|
||||||
uncompressedSizeWithoutHeader = b.getInt();
|
uncompressedSizeWithoutHeader = b.getInt();
|
||||||
prevBlockOffset = b.getLong();
|
prevBlockOffset = b.getLong();
|
||||||
this.fileContext = new HFileContext();
|
HFileContextBuilder contextBuilder = new HFileContextBuilder();
|
||||||
this.fileContext.setUsesHBaseChecksum(usesHBaseChecksum);
|
contextBuilder.withHBaseCheckSum(usesHBaseChecksum);
|
||||||
if (usesHBaseChecksum) {
|
if (usesHBaseChecksum) {
|
||||||
this.fileContext.setChecksumType(ChecksumType.codeToType(b.get()));
|
contextBuilder.withChecksumType(ChecksumType.codeToType(b.get()));
|
||||||
this.fileContext.setBytesPerChecksum(b.getInt());
|
contextBuilder.withBytesPerCheckSum(b.getInt());
|
||||||
this.onDiskDataSizeWithHeader = b.getInt();
|
this.onDiskDataSizeWithHeader = b.getInt();
|
||||||
} else {
|
} else {
|
||||||
this.fileContext.setChecksumType(ChecksumType.NULL);
|
contextBuilder.withChecksumType(ChecksumType.NULL);
|
||||||
this.fileContext.setBytesPerChecksum(0);
|
contextBuilder.withBytesPerCheckSum(0);
|
||||||
this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
|
this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
|
||||||
HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
|
HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
|
||||||
}
|
}
|
||||||
|
this.fileContext = contextBuilder.build();
|
||||||
buf = b;
|
buf = b;
|
||||||
buf.rewind();
|
buf.rewind();
|
||||||
}
|
}
|
||||||
|
@ -1019,9 +1020,18 @@ public class HFileBlock implements Cacheable {
|
||||||
* 0 value in bytesPerChecksum.
|
* 0 value in bytesPerChecksum.
|
||||||
*/
|
*/
|
||||||
public HFileBlock getBlockForCaching() {
|
public HFileBlock getBlockForCaching() {
|
||||||
HFileContext newContext = fileContext.clone();
|
HFileContext newContext = new HFileContextBuilder()
|
||||||
newContext.setBytesPerChecksum(0);
|
.withBlockSize(fileContext.getBlocksize())
|
||||||
newContext.setChecksumType(ChecksumType.NULL); // no checksums in cached data
|
.withBytesPerCheckSum(0)
|
||||||
|
.withChecksumType(ChecksumType.NULL) // no checksums in cached data
|
||||||
|
.withCompressionAlgo(fileContext.getCompression())
|
||||||
|
.withDataBlockEncodingInCache(fileContext.getEncodingInCache())
|
||||||
|
.withDataBlockEncodingOnDisk(fileContext.getEncodingOnDisk())
|
||||||
|
.withHBaseCheckSum(fileContext.shouldUseHBaseChecksum())
|
||||||
|
.withCompressTags(fileContext.shouldCompressTags())
|
||||||
|
.withIncludesMvcc(fileContext.shouldIncludeMvcc())
|
||||||
|
.withIncludesTags(fileContext.shouldIncludeTags())
|
||||||
|
.build();
|
||||||
return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
|
return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
|
||||||
getUncompressedSizeWithoutHeader(), prevOffset, getUncompressedBufferWithHeader(),
|
getUncompressedSizeWithoutHeader(), prevOffset, getUncompressedBufferWithHeader(),
|
||||||
DONT_FILL_HEADER, startOffset,
|
DONT_FILL_HEADER, startOffset,
|
||||||
|
|
|
@ -245,14 +245,14 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
|
||||||
* See HBASE-8732
|
* See HBASE-8732
|
||||||
* @return a new in cache encoding context
|
* @return a new in cache encoding context
|
||||||
*/
|
*/
|
||||||
private HFileBlockEncodingContext createInCacheEncodingContext(HFileContext meta) {
|
private HFileBlockEncodingContext createInCacheEncodingContext(HFileContext fileContext) {
|
||||||
HFileContext newMeta = meta.clone();
|
HFileContext newContext = new HFileContext(fileContext);
|
||||||
return (inCache != DataBlockEncoding.NONE) ?
|
return (inCache != DataBlockEncoding.NONE) ?
|
||||||
this.inCache.getEncoder().newDataBlockEncodingContext(
|
this.inCache.getEncoder().newDataBlockEncodingContext(
|
||||||
this.inCache, dummyHeader, newMeta)
|
this.inCache, dummyHeader, newContext)
|
||||||
:
|
:
|
||||||
// create a default encoding context
|
// create a default encoding context
|
||||||
new HFileBlockDefaultEncodingContext(this.inCache, dummyHeader, newMeta);
|
new HFileBlockDefaultEncodingContext(this.inCache, dummyHeader, newContext);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -179,12 +179,12 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected HFileContext createHFileContext(FixedFileTrailer trailer) {
|
protected HFileContext createHFileContext(FixedFileTrailer trailer) {
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext hFileContext = new HFileContextBuilder()
|
||||||
meta.setIncludesMvcc(this.includesMemstoreTS);
|
.withIncludesMvcc(this.includesMemstoreTS)
|
||||||
meta.setUsesHBaseChecksum(
|
.withCompressionAlgo(this.compressAlgo)
|
||||||
trailer.getMinorVersion() >= MINOR_VERSION_WITH_CHECKSUM);
|
.withHBaseCheckSum(trailer.getMinorVersion() >= MINOR_VERSION_WITH_CHECKSUM)
|
||||||
meta.setCompressAlgo(this.compressAlgo);
|
.build();
|
||||||
return meta;
|
return hFileContext;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -64,12 +64,13 @@ public class HFileReaderV3 extends HFileReaderV2 {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected HFileContext createHFileContext(FixedFileTrailer trailer) {
|
protected HFileContext createHFileContext(FixedFileTrailer trailer) {
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext hfileContext = new HFileContextBuilder()
|
||||||
meta.setIncludesMvcc(this.includesMemstoreTS);
|
.withIncludesMvcc(this.includesMemstoreTS)
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true)
|
||||||
meta.setCompressAlgo(this.compressAlgo);
|
.withCompressionAlgo(this.compressAlgo)
|
||||||
meta.setIncludesTags(true);
|
.withIncludesTags(true)
|
||||||
return meta;
|
.build();
|
||||||
|
return hfileContext;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -34,11 +34,8 @@ import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
|
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
|
import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
|
||||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
|
||||||
import org.apache.hadoop.hbase.util.BloomFilterWriter;
|
import org.apache.hadoop.hbase.util.BloomFilterWriter;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.io.Writable;
|
import org.apache.hadoop.io.Writable;
|
||||||
|
@ -135,9 +132,6 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
||||||
protected HFileBlock.Writer createBlockWriter() {
|
protected HFileBlock.Writer createBlockWriter() {
|
||||||
// HFile filesystem-level (non-caching) block writer
|
// HFile filesystem-level (non-caching) block writer
|
||||||
hFileContext.setIncludesTags(false);
|
hFileContext.setIncludesTags(false);
|
||||||
// This can be set while the write is created itself because
|
|
||||||
// in both cases useHBaseChecksum is going to be true
|
|
||||||
hFileContext.setUsesHBaseChecksum(true);
|
|
||||||
return new HFileBlock.Writer(blockEncoder, hFileContext);
|
return new HFileBlock.Writer(blockEncoder, hFileContext);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -186,7 +186,6 @@ public class HFileWriterV3 extends HFileWriterV2 {
|
||||||
protected HFileBlock.Writer createBlockWriter() {
|
protected HFileBlock.Writer createBlockWriter() {
|
||||||
// HFile filesystem-level (non-caching) block writer
|
// HFile filesystem-level (non-caching) block writer
|
||||||
hFileContext.setIncludesTags(true);
|
hFileContext.setIncludesTags(true);
|
||||||
hFileContext.setUsesHBaseChecksum(true);
|
|
||||||
return new HFileBlock.Writer(blockEncoder, hFileContext);
|
return new HFileBlock.Writer(blockEncoder, hFileContext);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,9 +49,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||||
import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
|
import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||||
|
@ -194,18 +192,20 @@ public class HFileOutputFormat extends FileOutputFormat<ImmutableBytesWritable,
|
||||||
: Integer.parseInt(blockSizeString);
|
: Integer.parseInt(blockSizeString);
|
||||||
Configuration tempConf = new Configuration(conf);
|
Configuration tempConf = new Configuration(conf);
|
||||||
tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
|
tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContextBuilder contextBuilder = new HFileContextBuilder()
|
||||||
meta.setCompressAlgo(AbstractHFileWriter.compressionByName(compression));
|
.withCompressionAlgo(AbstractHFileWriter.compressionByName(compression))
|
||||||
meta.setChecksumType(HStore.getChecksumType(conf));
|
.withChecksumType(HStore.getChecksumType(conf))
|
||||||
meta.setBytesPerChecksum(HStore.getBytesPerChecksum(conf));
|
.withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
|
||||||
meta.setBlocksize(blockSize);
|
.withBlockSize(blockSize);
|
||||||
if (dataBlockEncodingStr != null) {
|
if(dataBlockEncodingStr != null) {
|
||||||
meta.setEncodingInCache(DataBlockEncoding.valueOf(dataBlockEncodingStr));
|
contextBuilder.withDataBlockEncodingOnDisk(DataBlockEncoding.valueOf(dataBlockEncodingStr))
|
||||||
meta.setEncodingOnDisk(DataBlockEncoding.valueOf(dataBlockEncodingStr));
|
.withDataBlockEncodingInCache(DataBlockEncoding.valueOf(dataBlockEncodingStr));
|
||||||
}
|
}
|
||||||
|
HFileContext hFileContext = contextBuilder.build();
|
||||||
|
|
||||||
wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs)
|
wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs)
|
||||||
.withOutputDir(familydir).withBloomType(bloomType).withComparator(KeyValue.COMPARATOR)
|
.withOutputDir(familydir).withBloomType(bloomType).withComparator(KeyValue.COMPARATOR)
|
||||||
.withFileContext(meta)
|
.withFileContext(hFileContext)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
this.writers.put(family, wl);
|
this.writers.put(family, wl);
|
||||||
|
|
|
@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||||
|
@ -653,19 +654,19 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
||||||
int blocksize = familyDescriptor.getBlocksize();
|
int blocksize = familyDescriptor.getBlocksize();
|
||||||
Algorithm compression = familyDescriptor.getCompression();
|
Algorithm compression = familyDescriptor.getCompression();
|
||||||
BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
|
BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
|
||||||
|
HFileContext hFileContext = new HFileContextBuilder()
|
||||||
HFileContext meta = new HFileContext();
|
.withCompressionAlgo(compression)
|
||||||
meta.setCompressAlgo(compression);
|
.withChecksumType(HStore.getChecksumType(conf))
|
||||||
meta.setChecksumType(HStore.getChecksumType(conf));
|
.withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
|
||||||
meta.setBytesPerChecksum(HStore.getBytesPerChecksum(conf));
|
.withBlockSize(blocksize)
|
||||||
meta.setBlocksize(blocksize);
|
.withDataBlockEncodingInCache(familyDescriptor.getDataBlockEncoding())
|
||||||
meta.setEncodingInCache(familyDescriptor.getDataBlockEncoding());
|
.withDataBlockEncodingOnDisk(familyDescriptor.getDataBlockEncodingOnDisk())
|
||||||
meta.setEncodingOnDisk(familyDescriptor.getDataBlockEncodingOnDisk());
|
.build();
|
||||||
halfWriter = new StoreFile.WriterBuilder(conf, cacheConf,
|
halfWriter = new StoreFile.WriterBuilder(conf, cacheConf,
|
||||||
fs)
|
fs)
|
||||||
.withFilePath(outFile)
|
.withFilePath(outFile)
|
||||||
.withBloomType(bloomFilterType)
|
.withBloomType(bloomFilterType)
|
||||||
.withFileContext(meta)
|
.withFileContext(hFileContext)
|
||||||
.build();
|
.build();
|
||||||
HFileScanner scanner = halfReader.getScanner(false, false, false);
|
HFileScanner scanner = halfReader.getScanner(false, false, false);
|
||||||
scanner.seekTo();
|
scanner.seekTo();
|
||||||
|
|
|
@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.io.compress.Compression;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
|
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||||
|
@ -813,18 +814,20 @@ public class HStore implements Store {
|
||||||
|
|
||||||
private HFileContext createFileContext(Compression.Algorithm compression,
|
private HFileContext createFileContext(Compression.Algorithm compression,
|
||||||
boolean includeMVCCReadpoint, boolean includesTag) {
|
boolean includeMVCCReadpoint, boolean includesTag) {
|
||||||
HFileContext hFileContext = new HFileContext();
|
|
||||||
hFileContext.setIncludesMvcc(includeMVCCReadpoint);
|
|
||||||
hFileContext.setIncludesTags(includesTag);
|
|
||||||
if (compression == null) {
|
if (compression == null) {
|
||||||
compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
|
compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
|
||||||
}
|
}
|
||||||
hFileContext.setCompressAlgo(compression);
|
HFileContext hFileContext = new HFileContextBuilder()
|
||||||
hFileContext.setChecksumType(checksumType);
|
.withIncludesMvcc(includeMVCCReadpoint)
|
||||||
hFileContext.setBytesPerChecksum(bytesPerChecksum);
|
.withIncludesTags(includesTag)
|
||||||
hFileContext.setBlocksize(blocksize);
|
.withCompressionAlgo(compression)
|
||||||
hFileContext.setEncodingInCache(family.getDataBlockEncoding());
|
.withChecksumType(checksumType)
|
||||||
hFileContext.setEncodingOnDisk(family.getDataBlockEncodingOnDisk());
|
.withBytesPerCheckSum(bytesPerChecksum)
|
||||||
|
.withBlockSize(blocksize)
|
||||||
|
.withHBaseCheckSum(true)
|
||||||
|
.withDataBlockEncodingOnDisk(family.getDataBlockEncodingOnDisk())
|
||||||
|
.withDataBlockEncodingInCache(family.getDataBlockEncoding())
|
||||||
|
.build();
|
||||||
return hFileContext;
|
return hFileContext;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,9 +18,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.util;
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
import java.awt.*;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang.StringUtils;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
|
@ -32,10 +30,11 @@ import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.io.compress.Compressor;
|
import org.apache.hadoop.io.compress.Compressor;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -114,8 +113,8 @@ public class CompressionTest {
|
||||||
public static void doSmokeTest(FileSystem fs, Path path, String codec)
|
public static void doSmokeTest(FileSystem fs, Path path, String codec)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
Configuration conf = HBaseConfiguration.create();
|
Configuration conf = HBaseConfiguration.create();
|
||||||
HFileContext context = new HFileContext();
|
HFileContext context = new HFileContextBuilder()
|
||||||
context.setCompressAlgo(AbstractHFileWriter.compressionByName(codec));
|
.withCompressionAlgo(AbstractHFileWriter.compressionByName(codec)).build();
|
||||||
HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
|
HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
|
||||||
.withPath(fs, path)
|
.withPath(fs, path)
|
||||||
.withFileContext(context)
|
.withFileContext(context)
|
||||||
|
|
|
@ -30,10 +30,10 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
|
@ -189,8 +189,7 @@ public class HFilePerformanceEvaluation {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
void setUp() throws Exception {
|
void setUp() throws Exception {
|
||||||
HFileContext hFileContext = new HFileContext();
|
HFileContext hFileContext = new HFileContextBuilder().withBlockSize(RFILE_BLOCKSIZE).build();
|
||||||
hFileContext.setBlocksize(RFILE_BLOCKSIZE);
|
|
||||||
writer =
|
writer =
|
||||||
HFile.getWriterFactoryNoCache(conf)
|
HFile.getWriterFactoryNoCache(conf)
|
||||||
.withPath(fs, mf)
|
.withPath(fs, mf)
|
||||||
|
|
|
@ -37,8 +37,6 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.Coprocessor;
|
import org.apache.hadoop.hbase.Coprocessor;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
@ -46,7 +44,10 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.MediumTests;
|
import org.apache.hadoop.hbase.MediumTests;
|
||||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
|
import org.apache.hadoop.hbase.client.Durability;
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
|
@ -56,17 +57,17 @@ import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||||
import org.apache.hadoop.hbase.client.RowMutations;
|
import org.apache.hadoop.hbase.client.RowMutations;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.client.Durability;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
|
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
|
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
|
||||||
import org.apache.hadoop.hbase.regionserver.Store;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.ScanType;
|
import org.apache.hadoop.hbase.regionserver.ScanType;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.Store;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
|
@ -611,7 +612,7 @@ public class TestRegionObserverInterface {
|
||||||
Configuration conf,
|
Configuration conf,
|
||||||
FileSystem fs, Path path,
|
FileSystem fs, Path path,
|
||||||
byte[] family, byte[] qualifier) throws IOException {
|
byte[] family, byte[] qualifier) throws IOException {
|
||||||
HFileContext context = new HFileContext();
|
HFileContext context = new HFileContextBuilder().build();
|
||||||
HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
|
HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
|
||||||
.withPath(fs, path)
|
.withPath(fs, path)
|
||||||
.withFileContext(context)
|
.withFileContext(context)
|
||||||
|
|
|
@ -37,13 +37,13 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
import org.mortbay.log.Log;
|
|
||||||
|
|
||||||
@Category(SmallTests.class)
|
@Category(SmallTests.class)
|
||||||
public class TestHalfStoreFileReader {
|
public class TestHalfStoreFileReader {
|
||||||
|
@ -83,8 +83,7 @@ public class TestHalfStoreFileReader {
|
||||||
Configuration conf = TEST_UTIL.getConfiguration();
|
Configuration conf = TEST_UTIL.getConfiguration();
|
||||||
FileSystem fs = FileSystem.get(conf);
|
FileSystem fs = FileSystem.get(conf);
|
||||||
CacheConfig cacheConf = new CacheConfig(conf);
|
CacheConfig cacheConf = new CacheConfig(conf);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
|
||||||
meta.setBlocksize(1024);
|
|
||||||
HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
|
HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
|
||||||
.withPath(fs, p)
|
.withPath(fs, p)
|
||||||
.withFileContext(meta)
|
.withFileContext(meta)
|
||||||
|
@ -149,8 +148,7 @@ public class TestHalfStoreFileReader {
|
||||||
Configuration conf = TEST_UTIL.getConfiguration();
|
Configuration conf = TEST_UTIL.getConfiguration();
|
||||||
FileSystem fs = FileSystem.get(conf);
|
FileSystem fs = FileSystem.get(conf);
|
||||||
CacheConfig cacheConf = new CacheConfig(conf);
|
CacheConfig cacheConf = new CacheConfig(conf);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
|
||||||
meta.setBlocksize(1024);
|
|
||||||
HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
|
HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
|
||||||
.withPath(fs, p)
|
.withPath(fs, p)
|
||||||
.withFileContext(meta)
|
.withFileContext(meta)
|
||||||
|
|
|
@ -31,11 +31,12 @@ import java.util.Random;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.Tag;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue.Type;
|
import org.apache.hadoop.hbase.KeyValue.Type;
|
||||||
import org.apache.hadoop.hbase.LargeTests;
|
import org.apache.hadoop.hbase.LargeTests;
|
||||||
|
import org.apache.hadoop.hbase.Tag;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.test.RedundantKVGenerator;
|
import org.apache.hadoop.hbase.util.test.RedundantKVGenerator;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -76,11 +77,11 @@ public class TestDataBlockEncoders {
|
||||||
private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo,
|
private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo,
|
||||||
DataBlockEncoding encoding) {
|
DataBlockEncoding encoding) {
|
||||||
DataBlockEncoder encoder = encoding.getEncoder();
|
DataBlockEncoder encoder = encoding.getEncoder();
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withHBaseCheckSum(false)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTags);
|
.withIncludesTags(includesTags)
|
||||||
meta.setCompressAlgo(algo);
|
.withCompressionAlgo(algo).build();
|
||||||
if (encoder != null) {
|
if (encoder != null) {
|
||||||
return encoder.newDataBlockEncodingContext(encoding,
|
return encoder.newDataBlockEncodingContext(encoding,
|
||||||
HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
|
HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
|
||||||
|
@ -114,11 +115,11 @@ public class TestDataBlockEncoders {
|
||||||
DataInputStream dis = new DataInputStream(bais);
|
DataInputStream dis = new DataInputStream(bais);
|
||||||
ByteBuffer actualDataset;
|
ByteBuffer actualDataset;
|
||||||
DataBlockEncoder encoder = encoding.getEncoder();
|
DataBlockEncoder encoder = encoding.getEncoder();
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withHBaseCheckSum(false)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTags);
|
.withIncludesTags(includesTags)
|
||||||
meta.setCompressAlgo(Compression.Algorithm.NONE);
|
.withCompressionAlgo(Compression.Algorithm.NONE).build();
|
||||||
actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(meta));
|
actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(meta));
|
||||||
dataset.rewind();
|
dataset.rewind();
|
||||||
actualDataset.rewind();
|
actualDataset.rewind();
|
||||||
|
@ -219,11 +220,12 @@ public class TestDataBlockEncoders {
|
||||||
|
|
||||||
ByteBuffer encodedBuffer = ByteBuffer.wrap(encodeBytes(encoding, originalBuffer));
|
ByteBuffer encodedBuffer = ByteBuffer.wrap(encodeBytes(encoding, originalBuffer));
|
||||||
DataBlockEncoder encoder = encoding.getEncoder();
|
DataBlockEncoder encoder = encoding.getEncoder();
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withHBaseCheckSum(false)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTags);
|
.withIncludesTags(includesTags)
|
||||||
meta.setCompressAlgo(Compression.Algorithm.NONE);
|
.withCompressionAlgo(Compression.Algorithm.NONE)
|
||||||
|
.build();
|
||||||
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
|
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
|
||||||
encoder.newDataBlockDecodingContext(meta));
|
encoder.newDataBlockDecodingContext(meta));
|
||||||
seeker.setCurrentBuffer(encodedBuffer);
|
seeker.setCurrentBuffer(encodedBuffer);
|
||||||
|
@ -274,11 +276,12 @@ public class TestDataBlockEncoders {
|
||||||
throw new RuntimeException(String.format("Bug while encoding using '%s'",
|
throw new RuntimeException(String.format("Bug while encoding using '%s'",
|
||||||
encoder.toString()), e);
|
encoder.toString()), e);
|
||||||
}
|
}
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withHBaseCheckSum(false)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTags);
|
.withIncludesTags(includesTags)
|
||||||
meta.setCompressAlgo(Compression.Algorithm.NONE);
|
.withCompressionAlgo(Compression.Algorithm.NONE)
|
||||||
|
.build();
|
||||||
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
|
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
|
||||||
encoder.newDataBlockDecodingContext(meta));
|
encoder.newDataBlockDecodingContext(meta));
|
||||||
seeker.setCurrentBuffer(encodedBuffer);
|
seeker.setCurrentBuffer(encodedBuffer);
|
||||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeCodec;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder.EncodedSeeker;
|
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder.EncodedSeeker;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.CollectionBackedScanner;
|
import org.apache.hadoop.hbase.util.CollectionBackedScanner;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
@ -96,11 +97,11 @@ public class TestPrefixTreeEncoding {
|
||||||
PrefixTreeCodec encoder = new PrefixTreeCodec();
|
PrefixTreeCodec encoder = new PrefixTreeCodec();
|
||||||
int batchId = numBatchesWritten++;
|
int batchId = numBatchesWritten++;
|
||||||
ByteBuffer dataBuffer = generateFixedTestData(kvset, batchId, false, includesTag);
|
ByteBuffer dataBuffer = generateFixedTestData(kvset, batchId, false, includesTag);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withHBaseCheckSum(false)
|
||||||
meta.setIncludesMvcc(false);
|
.withIncludesMvcc(false)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setCompressAlgo(Algorithm.NONE);
|
.withCompressionAlgo(Algorithm.NONE).build();
|
||||||
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
||||||
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
|
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
|
||||||
encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
|
encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
|
||||||
|
@ -137,11 +138,12 @@ public class TestPrefixTreeEncoding {
|
||||||
public void testScanWithRandomData() throws Exception {
|
public void testScanWithRandomData() throws Exception {
|
||||||
PrefixTreeCodec encoder = new PrefixTreeCodec();
|
PrefixTreeCodec encoder = new PrefixTreeCodec();
|
||||||
ByteBuffer dataBuffer = generateRandomTestData(kvset, numBatchesWritten++, includesTag);
|
ByteBuffer dataBuffer = generateRandomTestData(kvset, numBatchesWritten++, includesTag);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withHBaseCheckSum(false)
|
||||||
meta.setIncludesMvcc(false);
|
.withIncludesMvcc(false)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setCompressAlgo(Algorithm.NONE);
|
.withCompressionAlgo(Algorithm.NONE)
|
||||||
|
.build();
|
||||||
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
||||||
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
|
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
|
||||||
encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
|
encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
|
||||||
|
@ -173,11 +175,12 @@ public class TestPrefixTreeEncoding {
|
||||||
PrefixTreeCodec encoder = new PrefixTreeCodec();
|
PrefixTreeCodec encoder = new PrefixTreeCodec();
|
||||||
int batchId = numBatchesWritten++;
|
int batchId = numBatchesWritten++;
|
||||||
ByteBuffer dataBuffer = generateRandomTestData(kvset, batchId, includesTag);
|
ByteBuffer dataBuffer = generateRandomTestData(kvset, batchId, includesTag);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withHBaseCheckSum(false)
|
||||||
meta.setIncludesMvcc(false);
|
.withIncludesMvcc(false)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setCompressAlgo(Algorithm.NONE);
|
.withCompressionAlgo(Algorithm.NONE)
|
||||||
|
.build();
|
||||||
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
||||||
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
|
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
|
||||||
encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
|
encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
|
||||||
|
@ -194,11 +197,12 @@ public class TestPrefixTreeEncoding {
|
||||||
PrefixTreeCodec encoder = new PrefixTreeCodec();
|
PrefixTreeCodec encoder = new PrefixTreeCodec();
|
||||||
int batchId = numBatchesWritten++;
|
int batchId = numBatchesWritten++;
|
||||||
ByteBuffer dataBuffer = generateFixedTestData(kvset, batchId, includesTag);
|
ByteBuffer dataBuffer = generateFixedTestData(kvset, batchId, includesTag);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withHBaseCheckSum(false)
|
||||||
meta.setIncludesMvcc(false);
|
.withIncludesMvcc(false)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setCompressAlgo(Algorithm.NONE);
|
.withCompressionAlgo(Algorithm.NONE)
|
||||||
|
.build();
|
||||||
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
||||||
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
|
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
|
||||||
encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
|
encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
|
||||||
|
|
|
@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil;
|
||||||
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
|
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
|
||||||
import org.apache.hadoop.hbase.io.HeapSize;
|
import org.apache.hadoop.hbase.io.HeapSize;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
|
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
|
||||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||||
|
|
||||||
|
@ -341,13 +340,14 @@ public class CacheTestUtils {
|
||||||
cachedBuffer.putInt(uncompressedSizeWithoutHeader);
|
cachedBuffer.putInt(uncompressedSizeWithoutHeader);
|
||||||
cachedBuffer.putLong(prevBlockOffset);
|
cachedBuffer.putLong(prevBlockOffset);
|
||||||
cachedBuffer.rewind();
|
cachedBuffer.rewind();
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withHBaseCheckSum(false)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(false);
|
.withIncludesTags(false)
|
||||||
meta.setCompressAlgo(Compression.Algorithm.NONE);
|
.withCompressionAlgo(Compression.Algorithm.NONE)
|
||||||
meta.setBytesPerChecksum(0);
|
.withBytesPerCheckSum(0)
|
||||||
meta.setChecksumType(ChecksumType.NULL);
|
.withChecksumType(ChecksumType.NULL)
|
||||||
|
.build();
|
||||||
HFileBlock generated = new HFileBlock(BlockType.DATA,
|
HFileBlock generated = new HFileBlock(BlockType.DATA,
|
||||||
onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader,
|
onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader,
|
||||||
prevBlockOffset, cachedBuffer, HFileBlock.DONT_FILL_HEADER,
|
prevBlockOffset, cachedBuffer, HFileBlock.DONT_FILL_HEADER,
|
||||||
|
|
|
@ -307,13 +307,10 @@ public class TestCacheOnWrite {
|
||||||
}
|
}
|
||||||
Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(),
|
Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(),
|
||||||
"test_cache_on_write");
|
"test_cache_on_write");
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withCompressionAlgo(compress)
|
||||||
meta.setCompressAlgo(compress);
|
.withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL)
|
||||||
meta.setChecksumType(CKTYPE);
|
.withBlockSize(DATA_BLOCK_SIZE).withDataBlockEncodingInCache(encoder.getEncodingInCache())
|
||||||
meta.setBytesPerChecksum(CKBYTES);
|
.withDataBlockEncodingOnDisk(encoder.getEncodingOnDisk()).build();
|
||||||
meta.setBlocksize(DATA_BLOCK_SIZE);
|
|
||||||
meta.setEncodingInCache(encoder.getEncodingInCache());
|
|
||||||
meta.setEncodingOnDisk(encoder.getEncodingOnDisk());
|
|
||||||
StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs)
|
StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs)
|
||||||
.withOutputDir(storeFileParentDir).withComparator(KeyValue.COMPARATOR)
|
.withOutputDir(storeFileParentDir).withComparator(KeyValue.COMPARATOR)
|
||||||
.withFileContext(meta)
|
.withFileContext(meta)
|
||||||
|
|
|
@ -19,11 +19,13 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.io.hfile;
|
package org.apache.hadoop.hbase.io.hfile;
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
import static org.apache.hadoop.hbase.io.compress.Compression.Algorithm.GZ;
|
||||||
|
import static org.apache.hadoop.hbase.io.compress.Compression.Algorithm.NONE;
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
import java.io.DataOutputStream;
|
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
|
import java.io.DataOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
|
@ -37,14 +39,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.SmallTests;
|
import org.apache.hadoop.hbase.SmallTests;
|
||||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
|
||||||
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
||||||
|
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||||
|
|
||||||
import static org.apache.hadoop.hbase.io.compress.Compression.Algorithm.*;
|
|
||||||
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
@ -92,12 +89,13 @@ public class TestChecksum {
|
||||||
Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
|
Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
|
||||||
+ algo);
|
+ algo);
|
||||||
FSDataOutputStream os = fs.create(path);
|
FSDataOutputStream os = fs.create(path);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setCompressAlgo(algo);
|
.withCompressionAlgo(algo)
|
||||||
meta.setIncludesMvcc(true);
|
.withIncludesMvcc(true)
|
||||||
meta.setIncludesTags(useTags);
|
.withIncludesTags(useTags)
|
||||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||||
|
.build();
|
||||||
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
|
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
|
||||||
long totalSize = 0;
|
long totalSize = 0;
|
||||||
for (int blockId = 0; blockId < 2; ++blockId) {
|
for (int blockId = 0; blockId < 2; ++blockId) {
|
||||||
|
@ -114,11 +112,12 @@ public class TestChecksum {
|
||||||
|
|
||||||
// Do a read that purposely introduces checksum verification failures.
|
// Do a read that purposely introduces checksum verification failures.
|
||||||
FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
|
FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
|
||||||
meta = new HFileContext();
|
meta = new HFileContextBuilder()
|
||||||
meta.setCompressAlgo(algo);
|
.withCompressionAlgo(algo)
|
||||||
meta.setIncludesMvcc(true);
|
.withIncludesMvcc(true)
|
||||||
meta.setIncludesTags(useTags);
|
.withIncludesTags(useTags)
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true)
|
||||||
|
.build();
|
||||||
HFileBlock.FSReader hbr = new FSReaderV2Test(is, totalSize, fs, path, meta);
|
HFileBlock.FSReader hbr = new FSReaderV2Test(is, totalSize, fs, path, meta);
|
||||||
HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
|
HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
|
||||||
b.sanityCheck();
|
b.sanityCheck();
|
||||||
|
@ -197,13 +196,14 @@ public class TestChecksum {
|
||||||
Path path = new Path(TEST_UTIL.getDataTestDir(), "checksumChunk_" +
|
Path path = new Path(TEST_UTIL.getDataTestDir(), "checksumChunk_" +
|
||||||
algo + bytesPerChecksum);
|
algo + bytesPerChecksum);
|
||||||
FSDataOutputStream os = fs.create(path);
|
FSDataOutputStream os = fs.create(path);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setCompressAlgo(algo);
|
.withCompressionAlgo(algo)
|
||||||
meta.setIncludesMvcc(true);
|
.withIncludesMvcc(true)
|
||||||
meta.setIncludesTags(useTags);
|
.withIncludesTags(useTags)
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true)
|
||||||
meta.setBytesPerChecksum(bytesPerChecksum);
|
.withBytesPerCheckSum(bytesPerChecksum)
|
||||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||||
|
.build();
|
||||||
HFileBlock.Writer hbw = new HFileBlock.Writer(null,
|
HFileBlock.Writer hbw = new HFileBlock.Writer(null,
|
||||||
meta);
|
meta);
|
||||||
|
|
||||||
|
@ -236,12 +236,13 @@ public class TestChecksum {
|
||||||
// Read data back from file.
|
// Read data back from file.
|
||||||
FSDataInputStream is = fs.open(path);
|
FSDataInputStream is = fs.open(path);
|
||||||
FSDataInputStream nochecksum = hfs.getNoChecksumFs().open(path);
|
FSDataInputStream nochecksum = hfs.getNoChecksumFs().open(path);
|
||||||
meta = new HFileContext();
|
meta = new HFileContextBuilder()
|
||||||
meta.setCompressAlgo(algo);
|
.withCompressionAlgo(algo)
|
||||||
meta.setIncludesMvcc(true);
|
.withIncludesMvcc(true)
|
||||||
meta.setIncludesTags(useTags);
|
.withIncludesTags(useTags)
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true)
|
||||||
meta.setBytesPerChecksum(bytesPerChecksum);
|
.withBytesPerCheckSum(bytesPerChecksum)
|
||||||
|
.build();
|
||||||
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(
|
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(
|
||||||
is, nochecksum), totalSize, hfs, path, meta);
|
is, nochecksum), totalSize, hfs, path, meta);
|
||||||
HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
|
HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
|
||||||
|
|
|
@ -83,8 +83,7 @@ public class TestHFile extends HBaseTestCase {
|
||||||
public void testEmptyHFile() throws IOException {
|
public void testEmptyHFile() throws IOException {
|
||||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||||
Path f = new Path(ROOT_DIR, getName());
|
Path f = new Path(ROOT_DIR, getName());
|
||||||
HFileContext context = new HFileContext();
|
HFileContext context = new HFileContextBuilder().withIncludesTags(false).build();
|
||||||
context.setIncludesTags(false);
|
|
||||||
Writer w =
|
Writer w =
|
||||||
HFile.getWriterFactory(conf, cacheConf).withPath(fs, f).withFileContext(context).create();
|
HFile.getWriterFactory(conf, cacheConf).withPath(fs, f).withFileContext(context).create();
|
||||||
w.close();
|
w.close();
|
||||||
|
@ -133,7 +132,7 @@ public class TestHFile extends HBaseTestCase {
|
||||||
public void testCorruptTruncatedHFile() throws IOException {
|
public void testCorruptTruncatedHFile() throws IOException {
|
||||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||||
Path f = new Path(ROOT_DIR, getName());
|
Path f = new Path(ROOT_DIR, getName());
|
||||||
HFileContext context = new HFileContext();
|
HFileContext context = new HFileContextBuilder().build();
|
||||||
Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
|
Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
|
||||||
.withFileContext(context).create();
|
.withFileContext(context).create();
|
||||||
writeSomeRecords(w, 0, 100, false);
|
writeSomeRecords(w, 0, 100, false);
|
||||||
|
@ -224,9 +223,10 @@ public class TestHFile extends HBaseTestCase {
|
||||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||||
Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString() + useTags);
|
Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString() + useTags);
|
||||||
FSDataOutputStream fout = createFSOutput(ncTFile);
|
FSDataOutputStream fout = createFSOutput(ncTFile);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setBlocksize(minBlockSize);
|
.withBlockSize(minBlockSize)
|
||||||
meta.setCompressAlgo(AbstractHFileWriter.compressionByName(codec));
|
.withCompressionAlgo(AbstractHFileWriter.compressionByName(codec))
|
||||||
|
.build();
|
||||||
Writer writer = HFile.getWriterFactory(conf, cacheConf)
|
Writer writer = HFile.getWriterFactory(conf, cacheConf)
|
||||||
.withOutputStream(fout)
|
.withOutputStream(fout)
|
||||||
.withFileContext(meta)
|
.withFileContext(meta)
|
||||||
|
@ -313,9 +313,9 @@ public class TestHFile extends HBaseTestCase {
|
||||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||||
Path mFile = new Path(ROOT_DIR, "meta.hfile");
|
Path mFile = new Path(ROOT_DIR, "meta.hfile");
|
||||||
FSDataOutputStream fout = createFSOutput(mFile);
|
FSDataOutputStream fout = createFSOutput(mFile);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setCompressAlgo(AbstractHFileWriter.compressionByName(compress));
|
.withCompressionAlgo(AbstractHFileWriter.compressionByName(compress))
|
||||||
meta.setBlocksize(minBlockSize);
|
.withBlockSize(minBlockSize).build();
|
||||||
Writer writer = HFile.getWriterFactory(conf, cacheConf)
|
Writer writer = HFile.getWriterFactory(conf, cacheConf)
|
||||||
.withOutputStream(fout)
|
.withOutputStream(fout)
|
||||||
.withFileContext(meta)
|
.withFileContext(meta)
|
||||||
|
@ -347,9 +347,8 @@ public class TestHFile extends HBaseTestCase {
|
||||||
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
|
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
|
||||||
Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
|
Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
|
||||||
FSDataOutputStream fout = createFSOutput(mFile);
|
FSDataOutputStream fout = createFSOutput(mFile);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withCompressionAlgo(compressAlgo)
|
||||||
meta.setCompressAlgo((compressAlgo));
|
.withBlockSize(minBlockSize).build();
|
||||||
meta.setBlocksize(minBlockSize);
|
|
||||||
Writer writer = HFile.getWriterFactory(conf, cacheConf)
|
Writer writer = HFile.getWriterFactory(conf, cacheConf)
|
||||||
.withOutputStream(fout)
|
.withOutputStream(fout)
|
||||||
.withFileContext(meta)
|
.withFileContext(meta)
|
||||||
|
|
|
@ -218,12 +218,13 @@ public class TestHFileBlock {
|
||||||
static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
|
static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
|
||||||
boolean includesMemstoreTS, boolean includesTag) throws IOException {
|
boolean includesMemstoreTS, boolean includesTag) throws IOException {
|
||||||
final BlockType blockType = BlockType.DATA;
|
final BlockType blockType = BlockType.DATA;
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setCompressAlgo(algo);
|
.withCompressionAlgo(algo)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||||
|
.build();
|
||||||
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
|
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
|
||||||
DataOutputStream dos = hbw.startWriting(blockType);
|
DataOutputStream dos = hbw.startWriting(blockType);
|
||||||
writeTestBlockContents(dos);
|
writeTestBlockContents(dos);
|
||||||
|
@ -301,12 +302,13 @@ public class TestHFileBlock {
|
||||||
Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
|
Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
|
||||||
+ algo);
|
+ algo);
|
||||||
FSDataOutputStream os = fs.create(path);
|
FSDataOutputStream os = fs.create(path);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setCompressAlgo(algo);
|
.withCompressionAlgo(algo)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||||
|
.build();
|
||||||
HFileBlock.Writer hbw = new HFileBlock.Writer(null,
|
HFileBlock.Writer hbw = new HFileBlock.Writer(null,
|
||||||
meta);
|
meta);
|
||||||
long totalSize = 0;
|
long totalSize = 0;
|
||||||
|
@ -320,11 +322,11 @@ public class TestHFileBlock {
|
||||||
os.close();
|
os.close();
|
||||||
|
|
||||||
FSDataInputStream is = fs.open(path);
|
FSDataInputStream is = fs.open(path);
|
||||||
meta = new HFileContext();
|
meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setCompressAlgo(algo);
|
.withCompressionAlgo(algo).build();
|
||||||
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
|
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
|
||||||
HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
|
HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
|
||||||
is.close();
|
is.close();
|
||||||
|
@ -383,12 +385,13 @@ public class TestHFileBlock {
|
||||||
FSDataOutputStream os = fs.create(path);
|
FSDataOutputStream os = fs.create(path);
|
||||||
HFileDataBlockEncoder dataBlockEncoder =
|
HFileDataBlockEncoder dataBlockEncoder =
|
||||||
new HFileDataBlockEncoderImpl(encoding);
|
new HFileDataBlockEncoderImpl(encoding);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setCompressAlgo(algo);
|
.withCompressionAlgo(algo)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||||
|
.build();
|
||||||
HFileBlock.Writer hbw = new HFileBlock.Writer(dataBlockEncoder,
|
HFileBlock.Writer hbw = new HFileBlock.Writer(dataBlockEncoder,
|
||||||
meta);
|
meta);
|
||||||
long totalSize = 0;
|
long totalSize = 0;
|
||||||
|
@ -404,11 +407,12 @@ public class TestHFileBlock {
|
||||||
os.close();
|
os.close();
|
||||||
|
|
||||||
FSDataInputStream is = fs.open(path);
|
FSDataInputStream is = fs.open(path);
|
||||||
meta = new HFileContext();
|
meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withCompressionAlgo(algo)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setCompressAlgo(algo);
|
.withIncludesTags(includesTag)
|
||||||
|
.build();
|
||||||
HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
|
HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
|
||||||
hbr.setDataBlockEncoder(dataBlockEncoder);
|
hbr.setDataBlockEncoder(dataBlockEncoder);
|
||||||
hbr.setIncludesMemstoreTS(includesMemstoreTS);
|
hbr.setIncludesMemstoreTS(includesMemstoreTS);
|
||||||
|
@ -457,10 +461,11 @@ public class TestHFileBlock {
|
||||||
DataBlockEncoder encoder = encoding.getEncoder();
|
DataBlockEncoder encoder = encoding.getEncoder();
|
||||||
int headerLen = dummyHeader.length;
|
int headerLen = dummyHeader.length;
|
||||||
byte[] encodedResultWithHeader = null;
|
byte[] encodedResultWithHeader = null;
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setCompressAlgo(algo);
|
.withCompressionAlgo(algo)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(useTag);
|
.withIncludesTags(useTag)
|
||||||
|
.build();
|
||||||
if (encoder != null) {
|
if (encoder != null) {
|
||||||
HFileBlockEncodingContext encodingCtx = encoder.newDataBlockEncodingContext(encoding,
|
HFileBlockEncodingContext encodingCtx = encoder.newDataBlockEncodingContext(encoding,
|
||||||
dummyHeader, meta);
|
dummyHeader, meta);
|
||||||
|
@ -550,11 +555,11 @@ public class TestHFileBlock {
|
||||||
expectedPrevOffsets, expectedTypes, expectedContents);
|
expectedPrevOffsets, expectedTypes, expectedContents);
|
||||||
|
|
||||||
FSDataInputStream is = fs.open(path);
|
FSDataInputStream is = fs.open(path);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setCompressAlgo(algo);
|
.withCompressionAlgo(algo).build();
|
||||||
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
|
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
|
||||||
long curOffset = 0;
|
long curOffset = 0;
|
||||||
for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
|
for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
|
||||||
|
@ -733,11 +738,12 @@ public class TestHFileBlock {
|
||||||
writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
|
writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
|
||||||
FSDataInputStream is = fs.open(path);
|
FSDataInputStream is = fs.open(path);
|
||||||
long fileSize = fs.getFileStatus(path).getLen();
|
long fileSize = fs.getFileStatus(path).getLen();
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setCompressAlgo(compressAlgo);
|
.withCompressionAlgo(compressAlgo)
|
||||||
|
.build();
|
||||||
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, fileSize, meta);
|
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, fileSize, meta);
|
||||||
|
|
||||||
Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
|
Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
|
||||||
|
@ -769,13 +775,14 @@ public class TestHFileBlock {
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
boolean cacheOnWrite = expectedContents != null;
|
boolean cacheOnWrite = expectedContents != null;
|
||||||
FSDataOutputStream os = fs.create(path);
|
FSDataOutputStream os = fs.create(path);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setCompressAlgo(compressAlgo);
|
.withCompressionAlgo(compressAlgo)
|
||||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||||
|
.build();
|
||||||
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
|
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
|
||||||
Map<BlockType, Long> prevOffsetByType = new HashMap<BlockType, Long>();
|
Map<BlockType, Long> prevOffsetByType = new HashMap<BlockType, Long>();
|
||||||
long totalSize = 0;
|
long totalSize = 0;
|
||||||
|
@ -839,14 +846,13 @@ public class TestHFileBlock {
|
||||||
for (int size : new int[] { 100, 256, 12345 }) {
|
for (int size : new int[] { 100, 256, 12345 }) {
|
||||||
byte[] byteArr = new byte[HConstants.HFILEBLOCK_HEADER_SIZE + size];
|
byte[] byteArr = new byte[HConstants.HFILEBLOCK_HEADER_SIZE + size];
|
||||||
ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
|
ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withHBaseCheckSum(false)
|
||||||
meta.setCompressAlgo(Algorithm.NONE);
|
.withCompressionAlgo(Algorithm.NONE)
|
||||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||||
meta.setChecksumType(ChecksumType.NULL);
|
.withChecksumType(ChecksumType.NULL).build();
|
||||||
meta.setBytesPerChecksum(0);
|
|
||||||
HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
|
HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
|
||||||
HFileBlock.FILL_HEADER, -1,
|
HFileBlock.FILL_HEADER, -1,
|
||||||
0, meta);
|
0, meta);
|
||||||
|
|
|
@ -42,13 +42,12 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.SmallTests;
|
import org.apache.hadoop.hbase.SmallTests;
|
||||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||||
|
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
|
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
|
||||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
|
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
|
import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
|
||||||
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||||
import org.apache.hadoop.io.compress.Compressor;
|
import org.apache.hadoop.io.compress.Compressor;
|
||||||
|
@ -198,11 +197,12 @@ public class TestHFileBlockCompatibility {
|
||||||
os.close();
|
os.close();
|
||||||
|
|
||||||
FSDataInputStream is = fs.open(path);
|
FSDataInputStream is = fs.open(path);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withHBaseCheckSum(false)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setCompressAlgo(algo);
|
.withCompressionAlgo(algo)
|
||||||
|
.build();
|
||||||
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
|
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
|
||||||
totalSize, fs, path, meta);
|
totalSize, fs, path, meta);
|
||||||
HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
|
HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
|
||||||
|
@ -280,11 +280,12 @@ public class TestHFileBlockCompatibility {
|
||||||
os.close();
|
os.close();
|
||||||
|
|
||||||
FSDataInputStream is = fs.open(path);
|
FSDataInputStream is = fs.open(path);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withHBaseCheckSum(false)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setCompressAlgo(algo);
|
.withCompressionAlgo(algo)
|
||||||
|
.build();
|
||||||
HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
|
HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
|
||||||
totalSize, fs, path, meta);
|
totalSize, fs, path, meta);
|
||||||
hbr.setDataBlockEncoder(dataBlockEncoder);
|
hbr.setDataBlockEncoder(dataBlockEncoder);
|
||||||
|
@ -420,12 +421,12 @@ public class TestHFileBlockCompatibility {
|
||||||
this.dataBlockEncoder = dataBlockEncoder != null
|
this.dataBlockEncoder = dataBlockEncoder != null
|
||||||
? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
|
? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
|
||||||
|
|
||||||
meta = new HFileContext();
|
meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withHBaseCheckSum(false)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(includesTag);
|
.withIncludesTags(includesTag)
|
||||||
meta.setCompressAlgo(compressionAlgorithm);
|
.withCompressionAlgo(compressionAlgorithm)
|
||||||
|
.build();
|
||||||
defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null, DUMMY_HEADER, meta);
|
defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null, DUMMY_HEADER, meta);
|
||||||
dataBlockEncodingCtx =
|
dataBlockEncodingCtx =
|
||||||
this.dataBlockEncoder.newOnDiskDataBlockEncodingContext(
|
this.dataBlockEncoder.newOnDiskDataBlockEncodingContext(
|
||||||
|
@ -730,9 +731,11 @@ public class TestHFileBlockCompatibility {
|
||||||
* Creates a new HFileBlock.
|
* Creates a new HFileBlock.
|
||||||
*/
|
*/
|
||||||
public HFileBlock getBlockForCaching() {
|
public HFileBlock getBlockForCaching() {
|
||||||
meta.setUsesHBaseChecksum(false);
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setChecksumType(ChecksumType.NULL);
|
.withHBaseCheckSum(false)
|
||||||
meta.setBytesPerChecksum(0);
|
.withChecksumType(ChecksumType.NULL)
|
||||||
|
.withBytesPerCheckSum(0)
|
||||||
|
.build();
|
||||||
return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
|
return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
|
||||||
getUncompressedSizeWithoutHeader(), prevOffset,
|
getUncompressedSizeWithoutHeader(), prevOffset,
|
||||||
getUncompressedBufferWithHeader(), DONT_FILL_HEADER, startOffset,
|
getUncompressedBufferWithHeader(), DONT_FILL_HEADER, startOffset,
|
||||||
|
|
|
@ -188,11 +188,12 @@ public class TestHFileBlockIndex {
|
||||||
LOG.info("Size of " + path + ": " + fileSize);
|
LOG.info("Size of " + path + ": " + fileSize);
|
||||||
|
|
||||||
FSDataInputStream istream = fs.open(path);
|
FSDataInputStream istream = fs.open(path);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(useTags);
|
.withIncludesTags(useTags)
|
||||||
meta.setCompressAlgo(compr);
|
.withCompressionAlgo(compr)
|
||||||
|
.build();
|
||||||
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream, fs.getFileStatus(path)
|
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream, fs.getFileStatus(path)
|
||||||
.getLen(), meta);
|
.getLen(), meta);
|
||||||
|
|
||||||
|
@ -241,13 +242,14 @@ public class TestHFileBlockIndex {
|
||||||
|
|
||||||
private void writeWholeIndex(boolean useTags) throws IOException {
|
private void writeWholeIndex(boolean useTags) throws IOException {
|
||||||
assertEquals(0, keys.size());
|
assertEquals(0, keys.size());
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(useTags);
|
.withIncludesTags(useTags)
|
||||||
meta.setCompressAlgo(compr);
|
.withCompressionAlgo(compr)
|
||||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||||
|
.build();
|
||||||
HFileBlock.Writer hbw = new HFileBlock.Writer(null,
|
HFileBlock.Writer hbw = new HFileBlock.Writer(null,
|
||||||
meta);
|
meta);
|
||||||
FSDataOutputStream outputStream = fs.create(path);
|
FSDataOutputStream outputStream = fs.create(path);
|
||||||
|
@ -516,9 +518,10 @@ public class TestHFileBlockIndex {
|
||||||
|
|
||||||
// Write the HFile
|
// Write the HFile
|
||||||
{
|
{
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setBlocksize(SMALL_BLOCK_SIZE);
|
.withBlockSize(SMALL_BLOCK_SIZE)
|
||||||
meta.setCompressAlgo(compr);
|
.withCompressionAlgo(compr)
|
||||||
|
.build();
|
||||||
HFile.Writer writer =
|
HFile.Writer writer =
|
||||||
HFile.getWriterFactory(conf, cacheConf)
|
HFile.getWriterFactory(conf, cacheConf)
|
||||||
.withPath(fs, hfilePath)
|
.withPath(fs, hfilePath)
|
||||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.SmallTests;
|
import org.apache.hadoop.hbase.SmallTests;
|
||||||
import org.apache.hadoop.hbase.io.HeapSize;
|
import org.apache.hadoop.hbase.io.HeapSize;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
|
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
|
||||||
|
@ -127,13 +126,12 @@ public class TestHFileDataBlockEncoder {
|
||||||
buf.position(headerSize);
|
buf.position(headerSize);
|
||||||
keyValues.rewind();
|
keyValues.rewind();
|
||||||
buf.put(keyValues);
|
buf.put(keyValues);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
|
||||||
meta.setUsesHBaseChecksum(false);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesTags(useTags)
|
||||||
meta.setIncludesTags(useTags);
|
.withBlockSize(0)
|
||||||
meta.setCompressAlgo(Compression.Algorithm.NONE);
|
.withChecksumType(ChecksumType.NULL)
|
||||||
meta.setBlocksize(0);
|
.build();
|
||||||
meta.setChecksumType(ChecksumType.NULL);
|
|
||||||
HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
|
HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
|
||||||
HFileBlock.FILL_HEADER, 0,
|
HFileBlock.FILL_HEADER, 0,
|
||||||
0, meta);
|
0, meta);
|
||||||
|
@ -203,13 +201,14 @@ public class TestHFileDataBlockEncoder {
|
||||||
buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
|
buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
|
||||||
keyValues.rewind();
|
keyValues.rewind();
|
||||||
buf.put(keyValues);
|
buf.put(keyValues);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(useTag);
|
.withIncludesTags(useTag)
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true)
|
||||||
meta.setCompressAlgo(Algorithm.NONE);
|
.withCompressionAlgo(Algorithm.NONE)
|
||||||
meta.setBlocksize(0);
|
.withBlockSize(0)
|
||||||
meta.setChecksumType(ChecksumType.NULL);
|
.withChecksumType(ChecksumType.NULL)
|
||||||
|
.build();
|
||||||
HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
|
HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
|
||||||
HFileBlock.FILL_HEADER, 0,
|
HFileBlock.FILL_HEADER, 0,
|
||||||
0, meta);
|
0, meta);
|
||||||
|
|
|
@ -23,10 +23,10 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
import org.junit.experimental.categories.Category;
|
|
||||||
import org.apache.hadoop.hbase.SmallTests;
|
import org.apache.hadoop.hbase.SmallTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test a case when an inline index chunk is converted to a root one. This reproduces the bug in
|
* Test a case when an inline index chunk is converted to a root one. This reproduces the bug in
|
||||||
|
@ -52,8 +52,7 @@ public class TestHFileInlineToRootChunkConversion {
|
||||||
FileSystem fs = FileSystem.get(conf);
|
FileSystem fs = FileSystem.get(conf);
|
||||||
CacheConfig cacheConf = new CacheConfig(conf);
|
CacheConfig cacheConf = new CacheConfig(conf);
|
||||||
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
|
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
|
||||||
HFileContext context = new HFileContext();
|
HFileContext context = new HFileContextBuilder().withBlockSize(16).build();
|
||||||
context.setBlocksize(16);
|
|
||||||
HFileWriterV2 hfw =
|
HFileWriterV2 hfw =
|
||||||
(HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf)
|
(HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf)
|
||||||
.withFileContext(context)
|
.withFileContext(context)
|
||||||
|
|
|
@ -161,9 +161,9 @@ public class TestHFilePerformance extends TestCase {
|
||||||
FSDataOutputStream fout = createFSOutput(path);
|
FSDataOutputStream fout = createFSOutput(path);
|
||||||
|
|
||||||
if ("HFile".equals(fileType)){
|
if ("HFile".equals(fileType)){
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setCompressAlgo(AbstractHFileWriter.compressionByName(codecName));
|
.withCompressionAlgo(AbstractHFileWriter.compressionByName(codecName))
|
||||||
meta.setBlocksize(minBlockSize);
|
.withBlockSize(minBlockSize).build();
|
||||||
System.out.println("HFile write method: ");
|
System.out.println("HFile write method: ");
|
||||||
HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
|
HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
|
||||||
.withOutputStream(fout)
|
.withOutputStream(fout)
|
||||||
|
|
|
@ -127,9 +127,10 @@ public class TestHFileSeek extends TestCase {
|
||||||
long totalBytes = 0;
|
long totalBytes = 0;
|
||||||
FSDataOutputStream fout = createFSOutput(path, fs);
|
FSDataOutputStream fout = createFSOutput(path, fs);
|
||||||
try {
|
try {
|
||||||
HFileContext context = new HFileContext();
|
HFileContext context = new HFileContextBuilder()
|
||||||
context.setBlocksize(options.minBlockSize);
|
.withBlockSize(options.minBlockSize)
|
||||||
context.setCompressAlgo(AbstractHFileWriter.compressionByName(options.compress));
|
.withCompressionAlgo(AbstractHFileWriter.compressionByName(options.compress))
|
||||||
|
.build();
|
||||||
Writer writer = HFile.getWriterFactoryNoCache(conf)
|
Writer writer = HFile.getWriterFactoryNoCache(conf)
|
||||||
.withOutputStream(fout)
|
.withOutputStream(fout)
|
||||||
.withFileContext(context)
|
.withFileContext(context)
|
||||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||||
import org.apache.hadoop.hbase.SmallTests;
|
import org.apache.hadoop.hbase.SmallTests;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
|
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.Writables;
|
import org.apache.hadoop.hbase.util.Writables;
|
||||||
|
@ -94,9 +93,10 @@ public class TestHFileWriterV2 {
|
||||||
private void writeDataAndReadFromHFile(Path hfilePath,
|
private void writeDataAndReadFromHFile(Path hfilePath,
|
||||||
Algorithm compressAlgo, int entryCount, boolean findMidKey) throws IOException {
|
Algorithm compressAlgo, int entryCount, boolean findMidKey) throws IOException {
|
||||||
|
|
||||||
HFileContext context = new HFileContext();
|
HFileContext context = new HFileContextBuilder()
|
||||||
context.setBlocksize(4096);
|
.withBlockSize(4096)
|
||||||
context.setCompressAlgo(compressAlgo);
|
.withCompressionAlgo(compressAlgo)
|
||||||
|
.build();
|
||||||
HFileWriterV2 writer = (HFileWriterV2)
|
HFileWriterV2 writer = (HFileWriterV2)
|
||||||
new HFileWriterV2.WriterFactoryV2(conf, new CacheConfig(conf))
|
new HFileWriterV2.WriterFactoryV2(conf, new CacheConfig(conf))
|
||||||
.withPath(fs, hfilePath)
|
.withPath(fs, hfilePath)
|
||||||
|
@ -137,11 +137,12 @@ public class TestHFileWriterV2 {
|
||||||
assertEquals(2, trailer.getMajorVersion());
|
assertEquals(2, trailer.getMajorVersion());
|
||||||
assertEquals(entryCount, trailer.getEntryCount());
|
assertEquals(entryCount, trailer.getEntryCount());
|
||||||
|
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true)
|
||||||
meta.setIncludesMvcc(false);
|
.withIncludesMvcc(false)
|
||||||
meta.setIncludesTags(false);
|
.withIncludesTags(false)
|
||||||
meta.setCompressAlgo(compressAlgo);
|
.withCompressionAlgo(compressAlgo)
|
||||||
|
.build();
|
||||||
|
|
||||||
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(fsdis, fileSize, meta);
|
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(fsdis, fileSize, meta);
|
||||||
// Comparator class name is stored in the trailer in version 2.
|
// Comparator class name is stored in the trailer in version 2.
|
||||||
|
|
|
@ -115,9 +115,9 @@ public class TestHFileWriterV3 {
|
||||||
|
|
||||||
private void writeDataAndReadFromHFile(Path hfilePath,
|
private void writeDataAndReadFromHFile(Path hfilePath,
|
||||||
Algorithm compressAlgo, int entryCount, boolean findMidKey, boolean useTags) throws IOException {
|
Algorithm compressAlgo, int entryCount, boolean findMidKey, boolean useTags) throws IOException {
|
||||||
HFileContext context = new HFileContext();
|
HFileContext context = new HFileContextBuilder()
|
||||||
context.setBlocksize(4096);
|
.withBlockSize(4096)
|
||||||
context.setCompressAlgo(compressAlgo);
|
.withCompressionAlgo(compressAlgo).build();
|
||||||
HFileWriterV3 writer = (HFileWriterV3)
|
HFileWriterV3 writer = (HFileWriterV3)
|
||||||
new HFileWriterV3.WriterFactoryV3(conf, new CacheConfig(conf))
|
new HFileWriterV3.WriterFactoryV3(conf, new CacheConfig(conf))
|
||||||
.withPath(fs, hfilePath)
|
.withPath(fs, hfilePath)
|
||||||
|
@ -168,11 +168,11 @@ public class TestHFileWriterV3 {
|
||||||
|
|
||||||
assertEquals(3, trailer.getMajorVersion());
|
assertEquals(3, trailer.getMajorVersion());
|
||||||
assertEquals(entryCount, trailer.getEntryCount());
|
assertEquals(entryCount, trailer.getEntryCount());
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setCompressAlgo(compressAlgo);
|
.withCompressionAlgo(compressAlgo)
|
||||||
meta.setIncludesMvcc(false);
|
.withIncludesMvcc(false)
|
||||||
meta.setIncludesTags(useTags);
|
.withIncludesTags(useTags)
|
||||||
meta.setUsesHBaseChecksum(true);
|
.withHBaseCheckSum(true).build();
|
||||||
HFileBlock.FSReader blockReader =
|
HFileBlock.FSReader blockReader =
|
||||||
new HFileBlock.FSReaderV2(fsdis, fileSize, meta);
|
new HFileBlock.FSReaderV2(fsdis, fileSize, meta);
|
||||||
// Comparator class name is stored in the trailer in version 2.
|
// Comparator class name is stored in the trailer in version 2.
|
||||||
|
|
|
@ -57,8 +57,7 @@ public class TestReseekTo {
|
||||||
TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
|
TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
|
||||||
}
|
}
|
||||||
CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
|
CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
|
||||||
HFileContext context = new HFileContext();
|
HFileContext context = new HFileContextBuilder().withBlockSize(4000).build();
|
||||||
context.setBlocksize(4000);
|
|
||||||
HFile.Writer writer = HFile.getWriterFactory(
|
HFile.Writer writer = HFile.getWriterFactory(
|
||||||
TEST_UTIL.getConfiguration(), cacheConf)
|
TEST_UTIL.getConfiguration(), cacheConf)
|
||||||
.withOutputStream(fout)
|
.withOutputStream(fout)
|
||||||
|
|
|
@ -22,9 +22,12 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.*;
|
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||||
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
import org.apache.hadoop.hbase.SmallTests;
|
||||||
|
import org.apache.hadoop.hbase.Tag;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.io.RawComparator;
|
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -73,8 +76,7 @@ public class TestSeekTo extends HBaseTestCase {
|
||||||
}
|
}
|
||||||
FSDataOutputStream fout = this.fs.create(ncTFile);
|
FSDataOutputStream fout = this.fs.create(ncTFile);
|
||||||
int blocksize = toKV("a", tagUsage).getLength() * 3;
|
int blocksize = toKV("a", tagUsage).getLength() * 3;
|
||||||
HFileContext context = new HFileContext();
|
HFileContext context = new HFileContextBuilder().withBlockSize(blocksize).build();
|
||||||
context.setBlocksize(blocksize);
|
|
||||||
HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withOutputStream(fout)
|
HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withOutputStream(fout)
|
||||||
.withFileContext(context)
|
.withFileContext(context)
|
||||||
// NOTE: This test is dependent on this deprecated nonstandard
|
// NOTE: This test is dependent on this deprecated nonstandard
|
||||||
|
|
|
@ -18,10 +18,22 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.mapreduce;
|
package org.apache.hadoop.hbase.mapreduce;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.*;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
import org.apache.hadoop.hbase.LargeTests;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||||
|
@ -29,6 +41,7 @@ import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||||
|
@ -38,11 +51,6 @@ import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.TreeMap;
|
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test cases for the "load" half of the HFileOutputFormat bulk load
|
* Test cases for the "load" half of the HFileOutputFormat bulk load
|
||||||
* functionality. These tests run faster than the full MR cluster
|
* functionality. These tests run faster than the full MR cluster
|
||||||
|
@ -262,9 +270,10 @@ public class TestLoadIncrementalHFiles {
|
||||||
byte[] family, byte[] qualifier,
|
byte[] family, byte[] qualifier,
|
||||||
byte[] startKey, byte[] endKey, int numRows) throws IOException
|
byte[] startKey, byte[] endKey, int numRows) throws IOException
|
||||||
{
|
{
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setBlocksize(BLOCKSIZE);
|
.withBlockSize(BLOCKSIZE)
|
||||||
meta.setCompressAlgo(COMPRESSION);
|
.withCompressionAlgo(COMPRESSION)
|
||||||
|
.build();
|
||||||
HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration))
|
HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration))
|
||||||
.withPath(fs, path)
|
.withPath(fs, path)
|
||||||
.withFileContext(meta)
|
.withFileContext(meta)
|
||||||
|
|
|
@ -38,9 +38,9 @@ import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex;
|
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
||||||
import org.apache.hadoop.io.BytesWritable;
|
import org.apache.hadoop.io.BytesWritable;
|
||||||
|
|
||||||
|
@ -183,9 +183,8 @@ public class CreateRandomStoreFile {
|
||||||
Integer.valueOf(cmdLine.getOptionValue(INDEX_BLOCK_SIZE_OPTION)));
|
Integer.valueOf(cmdLine.getOptionValue(INDEX_BLOCK_SIZE_OPTION)));
|
||||||
}
|
}
|
||||||
|
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withCompressionAlgo(compr)
|
||||||
meta.setCompressAlgo(compr);
|
.withBlockSize(blockSize).build();
|
||||||
meta.setBlocksize(blockSize);
|
|
||||||
StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf,
|
StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf,
|
||||||
new CacheConfig(conf), fs)
|
new CacheConfig(conf), fs)
|
||||||
.withOutputDir(outputDir)
|
.withOutputDir(outputDir)
|
||||||
|
|
|
@ -45,9 +45,10 @@ import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
|
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||||
import org.apache.hadoop.hbase.io.encoding.EncodedDataBlock;
|
import org.apache.hadoop.hbase.io.encoding.EncodedDataBlock;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
|
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileReaderV2;
|
import org.apache.hadoop.hbase.io.hfile.HFileReaderV2;
|
||||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -214,10 +215,10 @@ public class DataBlockEncodingTool {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
DataBlockEncoder d = encoding.getEncoder();
|
DataBlockEncoder d = encoding.getEncoder();
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setCompressAlgo(Compression.Algorithm.NONE);
|
.withCompressionAlgo(Compression.Algorithm.NONE)
|
||||||
meta.setIncludesMvcc(includesMemstoreTS);
|
.withIncludesMvcc(includesMemstoreTS)
|
||||||
meta.setIncludesTags(useTag);
|
.withIncludesTags(useTag).build();
|
||||||
codecs.add(new EncodedDataBlock(d, encoding, rawKVs, meta ));
|
codecs.add(new EncodedDataBlock(d, encoding, rawKVs, meta ));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,13 +36,16 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.*;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
import org.apache.hadoop.hbase.MediumTests;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||||
import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
|
import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
|
||||||
import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
||||||
import org.apache.hadoop.hbase.util.ByteBloomFilter;
|
import org.apache.hadoop.hbase.util.ByteBloomFilter;
|
||||||
|
@ -292,8 +295,7 @@ public class TestCompoundBloomFilter {
|
||||||
BLOOM_BLOCK_SIZES[t]);
|
BLOOM_BLOCK_SIZES[t]);
|
||||||
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
|
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
|
||||||
cacheConf = new CacheConfig(conf);
|
cacheConf = new CacheConfig(conf);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCK_SIZES[t]).build();
|
||||||
meta.setBlocksize(BLOCK_SIZES[t]);
|
|
||||||
StoreFile.Writer w = new StoreFile.WriterBuilder(conf, cacheConf, fs)
|
StoreFile.Writer w = new StoreFile.WriterBuilder(conf, cacheConf, fs)
|
||||||
.withOutputDir(TEST_UTIL.getDataTestDir())
|
.withOutputDir(TEST_UTIL.getDataTestDir())
|
||||||
.withBloomType(bt)
|
.withBloomType(bt)
|
||||||
|
|
|
@ -37,12 +37,19 @@ import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FilterFileSystem;
|
import org.apache.hadoop.fs.FilterFileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.PositionedReadable;
|
import org.apache.hadoop.fs.PositionedReadable;
|
||||||
import org.apache.hadoop.hbase.*;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
import org.apache.hadoop.hbase.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -73,8 +80,7 @@ public class TestFSErrorsExposed {
|
||||||
FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
|
FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
|
||||||
FileSystem fs = new HFileSystem(faultyfs);
|
FileSystem fs = new HFileSystem(faultyfs);
|
||||||
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
|
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
|
||||||
meta.setBlocksize(2*1024);
|
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(
|
||||||
util.getConfiguration(), cacheConf, hfs)
|
util.getConfiguration(), cacheConf, hfs)
|
||||||
.withOutputDir(hfilePath)
|
.withOutputDir(hfilePath)
|
||||||
|
@ -125,8 +131,7 @@ public class TestFSErrorsExposed {
|
||||||
FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
|
FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
|
||||||
HFileSystem fs = new HFileSystem(faultyfs);
|
HFileSystem fs = new HFileSystem(faultyfs);
|
||||||
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
|
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
|
||||||
meta.setBlocksize(2 * 1024);
|
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(
|
||||||
util.getConfiguration(), cacheConf, hfs)
|
util.getConfiguration(), cacheConf, hfs)
|
||||||
.withOutputDir(hfilePath)
|
.withOutputDir(hfilePath)
|
||||||
|
|
|
@ -27,9 +27,16 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.*;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
import org.apache.hadoop.hbase.LargeTests;
|
||||||
import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread;
|
import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread;
|
||||||
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
|
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
|
||||||
|
import org.apache.hadoop.hbase.TableExistsException;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.HConnection;
|
import org.apache.hadoop.hbase.client.HConnection;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.client.RegionServerCallable;
|
import org.apache.hadoop.hbase.client.RegionServerCallable;
|
||||||
|
@ -38,12 +45,12 @@ import org.apache.hadoop.hbase.client.ResultScanner;
|
||||||
import org.apache.hadoop.hbase.client.RpcRetryingCaller;
|
import org.apache.hadoop.hbase.client.RpcRetryingCaller;
|
||||||
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
|
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.TableExistsException;
|
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.protobuf.RequestConverter;
|
import org.apache.hadoop.hbase.protobuf.RequestConverter;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
|
||||||
|
@ -51,9 +58,9 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequ
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import org.junit.experimental.categories.Category;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests bulk loading of HFiles and shows the atomicity or lack of atomicity of
|
* Tests bulk loading of HFiles and shows the atomicity or lack of atomicity of
|
||||||
|
@ -89,9 +96,9 @@ public class TestHRegionServerBulkLoad {
|
||||||
*/
|
*/
|
||||||
public static void createHFile(FileSystem fs, Path path, byte[] family,
|
public static void createHFile(FileSystem fs, Path path, byte[] family,
|
||||||
byte[] qualifier, byte[] value, int numRows) throws IOException {
|
byte[] qualifier, byte[] value, int numRows) throws IOException {
|
||||||
HFileContext context = new HFileContext();
|
HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE)
|
||||||
context.setBlocksize(BLOCKSIZE);
|
.withCompressionAlgo(COMPRESSION)
|
||||||
context.setCompressAlgo(COMPRESSION);
|
.build();
|
||||||
HFile.Writer writer = HFile
|
HFile.Writer writer = HFile
|
||||||
.getWriterFactory(conf, new CacheConfig(conf))
|
.getWriterFactory(conf, new CacheConfig(conf))
|
||||||
.withPath(fs, path)
|
.withPath(fs, path)
|
||||||
|
|
|
@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
|
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
|
||||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||||
|
@ -321,8 +322,7 @@ public class TestStore extends TestCase {
|
||||||
long seqid = f.getMaxSequenceId();
|
long seqid = f.getMaxSequenceId();
|
||||||
Configuration c = HBaseConfiguration.create();
|
Configuration c = HBaseConfiguration.create();
|
||||||
FileSystem fs = FileSystem.get(c);
|
FileSystem fs = FileSystem.get(c);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL).build();
|
||||||
meta.setBlocksize(StoreFile.DEFAULT_BLOCKSIZE_SMALL);
|
|
||||||
StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c),
|
StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c),
|
||||||
fs)
|
fs)
|
||||||
.withOutputDir(storedir)
|
.withOutputDir(storedir)
|
||||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheStats;
|
import org.apache.hadoop.hbase.io.hfile.CacheStats;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
|
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||||
|
@ -96,8 +97,7 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||||
conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri);
|
conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri);
|
||||||
|
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(2*1024).build();
|
||||||
meta.setBlocksize(2 * 1024);
|
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||||
.withFilePath(regionFs.createTempName())
|
.withFilePath(regionFs.createTempName())
|
||||||
.withFileContext(meta)
|
.withFileContext(meta)
|
||||||
|
@ -148,8 +148,7 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||||
conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri);
|
conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri);
|
||||||
|
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
|
||||||
meta.setBlocksize(8 * 1024);
|
|
||||||
// Make a store file and write data to it.
|
// Make a store file and write data to it.
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||||
.withFilePath(regionFs.createTempName())
|
.withFilePath(regionFs.createTempName())
|
||||||
|
@ -194,8 +193,7 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
FSUtils.setRootDir(testConf, this.testDir);
|
FSUtils.setRootDir(testConf, this.testDir);
|
||||||
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||||
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);
|
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
|
||||||
meta.setBlocksize(8 * 1024);
|
|
||||||
|
|
||||||
// Make a store file and write data to it.
|
// Make a store file and write data to it.
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||||
|
@ -240,8 +238,7 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||||
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);
|
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);
|
||||||
|
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
|
||||||
meta.setBlocksize(8 * 1024);
|
|
||||||
// Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
|
// Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs)
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs)
|
||||||
.withFilePath(regionFs.createTempName())
|
.withFilePath(regionFs.createTempName())
|
||||||
|
@ -503,10 +500,9 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
|
|
||||||
// write the file
|
// write the file
|
||||||
Path f = new Path(ROOT_DIR, getName());
|
Path f = new Path(ROOT_DIR, getName());
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL)
|
||||||
meta.setBlocksize(StoreFile.DEFAULT_BLOCKSIZE_SMALL);
|
.withChecksumType(CKTYPE)
|
||||||
meta.setChecksumType(CKTYPE);
|
.withBytesPerCheckSum(CKBYTES).build();
|
||||||
meta.setBytesPerChecksum(CKBYTES);
|
|
||||||
// Make a store file and write data to it.
|
// Make a store file and write data to it.
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||||
.withFilePath(f)
|
.withFilePath(f)
|
||||||
|
@ -526,10 +522,10 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
// write the file
|
// write the file
|
||||||
Path f = new Path(ROOT_DIR, getName());
|
Path f = new Path(ROOT_DIR, getName());
|
||||||
|
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder()
|
||||||
meta.setBlocksize(StoreFile.DEFAULT_BLOCKSIZE_SMALL);
|
.withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL)
|
||||||
meta.setChecksumType(CKTYPE);
|
.withChecksumType(CKTYPE)
|
||||||
meta.setBytesPerChecksum(CKBYTES);
|
.withBytesPerCheckSum(CKBYTES).build();
|
||||||
// Make a store file and write data to it.
|
// Make a store file and write data to it.
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||||
.withFilePath(f)
|
.withFilePath(f)
|
||||||
|
@ -582,8 +578,7 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
public void testReseek() throws Exception {
|
public void testReseek() throws Exception {
|
||||||
// write the file
|
// write the file
|
||||||
Path f = new Path(ROOT_DIR, getName());
|
Path f = new Path(ROOT_DIR, getName());
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
|
||||||
meta.setBlocksize(8 * 1024);
|
|
||||||
// Make a store file and write data to it.
|
// Make a store file and write data to it.
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||||
.withFilePath(f)
|
.withFilePath(f)
|
||||||
|
@ -626,10 +621,9 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
for (int x : new int[]{0,1}) {
|
for (int x : new int[]{0,1}) {
|
||||||
// write the file
|
// write the file
|
||||||
Path f = new Path(ROOT_DIR, getName() + x);
|
Path f = new Path(ROOT_DIR, getName() + x);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL)
|
||||||
meta.setBlocksize(StoreFile.DEFAULT_BLOCKSIZE_SMALL);
|
.withChecksumType(CKTYPE)
|
||||||
meta.setChecksumType(CKTYPE);
|
.withBytesPerCheckSum(CKBYTES).build();
|
||||||
meta.setBytesPerChecksum(CKBYTES);
|
|
||||||
// Make a store file and write data to it.
|
// Make a store file and write data to it.
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||||
.withFilePath(f)
|
.withFilePath(f)
|
||||||
|
@ -782,8 +776,7 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
|
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
|
||||||
Path storedir = new Path(new Path(this.testDir, "7e0102"), "familyname");
|
Path storedir = new Path(new Path(this.testDir, "7e0102"), "familyname");
|
||||||
Path dir = new Path(storedir, "1234567890");
|
Path dir = new Path(storedir, "1234567890");
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
|
||||||
meta.setBlocksize(8 * 1024);
|
|
||||||
// Make a store file and write data to it.
|
// Make a store file and write data to it.
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||||
.withOutputDir(dir)
|
.withOutputDir(dir)
|
||||||
|
@ -969,10 +962,10 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
totalSize += kv.getLength() + 1;
|
totalSize += kv.getLength() + 1;
|
||||||
}
|
}
|
||||||
int blockSize = totalSize / numBlocks;
|
int blockSize = totalSize / numBlocks;
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(blockSize)
|
||||||
meta.setBlocksize(blockSize);
|
.withChecksumType(CKTYPE)
|
||||||
meta.setChecksumType(CKTYPE);
|
.withBytesPerCheckSum(CKBYTES)
|
||||||
meta.setBytesPerChecksum(CKBYTES);
|
.build();
|
||||||
// Make a store file and write data to it.
|
// Make a store file and write data to it.
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||||
.withFilePath(path)
|
.withFilePath(path)
|
||||||
|
@ -1005,12 +998,12 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
dataBlockEncoderAlgo,
|
dataBlockEncoderAlgo,
|
||||||
dataBlockEncoderAlgo);
|
dataBlockEncoderAlgo);
|
||||||
cacheConf = new CacheConfig(conf);
|
cacheConf = new CacheConfig(conf);
|
||||||
HFileContext meta = new HFileContext();
|
HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL)
|
||||||
meta.setBlocksize(HConstants.DEFAULT_BLOCKSIZE);
|
.withChecksumType(CKTYPE)
|
||||||
meta.setChecksumType(CKTYPE);
|
.withBytesPerCheckSum(CKBYTES)
|
||||||
meta.setBytesPerChecksum(CKBYTES);
|
.withDataBlockEncodingInCache(dataBlockEncoderAlgo)
|
||||||
meta.setEncodingOnDisk(dataBlockEncoderAlgo);
|
.withDataBlockEncodingOnDisk(dataBlockEncoderAlgo)
|
||||||
meta.setEncodingInCache(dataBlockEncoderAlgo);
|
.build();
|
||||||
// Make a store file and write data to it.
|
// Make a store file and write data to it.
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||||
.withFilePath(path)
|
.withFilePath(path)
|
||||||
|
|
|
@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.master.HMaster;
|
import org.apache.hadoop.hbase.master.HMaster;
|
||||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||||
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
|
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
|
||||||
|
@ -327,7 +328,7 @@ public class TestWALReplay {
|
||||||
HLog wal = createWAL(this.conf);
|
HLog wal = createWAL(this.conf);
|
||||||
HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);
|
HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);
|
||||||
Path f = new Path(basedir, "hfile");
|
Path f = new Path(basedir, "hfile");
|
||||||
HFileContext context = new HFileContext();
|
HFileContext context = new HFileContextBuilder().build();
|
||||||
HFile.Writer writer =
|
HFile.Writer writer =
|
||||||
HFile.getWriterFactoryNoCache(conf).withPath(fs, f)
|
HFile.getWriterFactoryNoCache(conf).withPath(fs, f)
|
||||||
.withFileContext(context).create();
|
.withFileContext(context).create();
|
||||||
|
|
|
@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
|
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
|
||||||
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
|
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
|
@ -843,7 +844,7 @@ public class TestAccessController extends SecureTestUtil {
|
||||||
HFile.Writer writer = null;
|
HFile.Writer writer = null;
|
||||||
long now = System.currentTimeMillis();
|
long now = System.currentTimeMillis();
|
||||||
try {
|
try {
|
||||||
HFileContext context = new HFileContext();
|
HFileContext context = new HFileContextBuilder().build();
|
||||||
writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
|
writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
|
||||||
.withPath(fs, path)
|
.withPath(fs, path)
|
||||||
.withFileContext(context)
|
.withFileContext(context)
|
||||||
|
|
Loading…
Reference in New Issue