HBASE-9546-HFileContext should adopt Builder pattern (Ram)
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1530567 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
3993047970
commit
7dd240c8ad
|
@ -59,27 +59,52 @@ public class HFileContext implements HeapSize, Cloneable {
|
|||
//Empty constructor. Go with setters
|
||||
public HFileContext() {
|
||||
}
|
||||
/**
|
||||
* Copy constructor
|
||||
* @param context
|
||||
*/
|
||||
public HFileContext(HFileContext context) {
|
||||
this.usesHBaseChecksum = context.usesHBaseChecksum;
|
||||
this.includesMvcc = context.includesMvcc;
|
||||
this.includesTags = context.includesTags;
|
||||
this.compressAlgo = context.compressAlgo;
|
||||
this.compressTags = context.compressTags;
|
||||
this.checksumType = context.checksumType;
|
||||
this.bytesPerChecksum = context.bytesPerChecksum;
|
||||
this.blocksize = context.blocksize;
|
||||
this.encodingOnDisk = context.encodingOnDisk;
|
||||
this.encodingInCache = context.encodingInCache;
|
||||
}
|
||||
|
||||
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
|
||||
Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
|
||||
int bytesPerChecksum, int blockSize, DataBlockEncoding encodingOnDisk,
|
||||
DataBlockEncoding encodingInCache) {
|
||||
this.usesHBaseChecksum = useHBaseChecksum;
|
||||
this.includesMvcc = includesMvcc;
|
||||
this.includesTags = includesTags;
|
||||
this.compressAlgo = compressAlgo;
|
||||
this.compressTags = compressTags;
|
||||
this.checksumType = checksumType;
|
||||
this.bytesPerChecksum = bytesPerChecksum;
|
||||
this.blocksize = blockSize;
|
||||
this.encodingOnDisk = encodingOnDisk;
|
||||
this.encodingInCache = encodingInCache;
|
||||
}
|
||||
|
||||
public Algorithm getCompression() {
|
||||
return compressAlgo;
|
||||
}
|
||||
|
||||
public void setCompressAlgo(Algorithm compressAlgo) {
|
||||
this.compressAlgo = compressAlgo;
|
||||
}
|
||||
|
||||
public boolean shouldUseHBaseChecksum() {
|
||||
return usesHBaseChecksum;
|
||||
}
|
||||
|
||||
public void setUsesHBaseChecksum(boolean usesHBaseChecksum) {
|
||||
this.usesHBaseChecksum = usesHBaseChecksum;
|
||||
}
|
||||
|
||||
public boolean shouldIncludeMvcc() {
|
||||
return includesMvcc;
|
||||
}
|
||||
|
||||
// TODO : This setter should be removed
|
||||
public void setIncludesMvcc(boolean includesMvcc) {
|
||||
this.includesMvcc = includesMvcc;
|
||||
}
|
||||
|
@ -88,6 +113,7 @@ public class HFileContext implements HeapSize, Cloneable {
|
|||
return includesTags;
|
||||
}
|
||||
|
||||
// TODO : This setter should be removed?
|
||||
public void setIncludesTags(boolean includesTags) {
|
||||
this.includesTags = includesTags;
|
||||
}
|
||||
|
@ -96,50 +122,26 @@ public class HFileContext implements HeapSize, Cloneable {
|
|||
return compressTags;
|
||||
}
|
||||
|
||||
public void setCompressTags(boolean compressTags) {
|
||||
this.compressTags = compressTags;
|
||||
}
|
||||
|
||||
public ChecksumType getChecksumType() {
|
||||
return checksumType;
|
||||
}
|
||||
|
||||
public void setChecksumType(ChecksumType checksumType) {
|
||||
this.checksumType = checksumType;
|
||||
}
|
||||
|
||||
public int getBytesPerChecksum() {
|
||||
return bytesPerChecksum;
|
||||
}
|
||||
|
||||
public void setBytesPerChecksum(int bytesPerChecksum) {
|
||||
this.bytesPerChecksum = bytesPerChecksum;
|
||||
}
|
||||
|
||||
public int getBlocksize() {
|
||||
return blocksize;
|
||||
}
|
||||
|
||||
public void setBlocksize(int blocksize) {
|
||||
this.blocksize = blocksize;
|
||||
}
|
||||
|
||||
public DataBlockEncoding getEncodingOnDisk() {
|
||||
return encodingOnDisk;
|
||||
}
|
||||
|
||||
public void setEncodingOnDisk(DataBlockEncoding encodingOnDisk) {
|
||||
this.encodingOnDisk = encodingOnDisk;
|
||||
}
|
||||
|
||||
public DataBlockEncoding getEncodingInCache() {
|
||||
return encodingInCache;
|
||||
}
|
||||
|
||||
public void setEncodingInCache(DataBlockEncoding encodingInCache) {
|
||||
this.encodingInCache = encodingInCache;
|
||||
}
|
||||
|
||||
/**
|
||||
* HeapSize implementation
|
||||
* NOTE : The heapsize should be altered as and when new state variable are added
|
||||
|
@ -171,4 +173,4 @@ public class HFileContext implements HeapSize, Cloneable {
|
|||
clonnedCtx.encodingInCache = this.encodingInCache;
|
||||
return clonnedCtx;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.io.hfile;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||
/**
|
||||
* A builder that helps in building up the HFileContext
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class HFileContextBuilder {
|
||||
|
||||
public static final int DEFAULT_BYTES_PER_CHECKSUM = 16 * 1024;
|
||||
public static final ChecksumType DEFAULT_CHECKSUM_TYPE = ChecksumType.CRC32;
|
||||
|
||||
/** Whether checksum is enabled or not **/
|
||||
private boolean usesHBaseChecksum = true;
|
||||
/** Whether mvcc is to be included in the Read/Write **/
|
||||
private boolean includesMvcc = true;
|
||||
/** Whether tags are to be included in the Read/Write **/
|
||||
private boolean includesTags;
|
||||
/** Compression algorithm used **/
|
||||
private Algorithm compressAlgo = Algorithm.NONE;
|
||||
/** Whether tags to be compressed or not **/
|
||||
private boolean compressTags;
|
||||
/** the checksum type **/
|
||||
private ChecksumType checksumType = DEFAULT_CHECKSUM_TYPE;
|
||||
/** the number of bytes per checksum value **/
|
||||
private int bytesPerChecksum = DEFAULT_BYTES_PER_CHECKSUM;
|
||||
/** Number of uncompressed bytes we allow per block. */
|
||||
private int blocksize = HConstants.DEFAULT_BLOCKSIZE;
|
||||
private DataBlockEncoding encodingOnDisk = DataBlockEncoding.NONE;
|
||||
private DataBlockEncoding encodingInCache = DataBlockEncoding.NONE;
|
||||
|
||||
public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) {
|
||||
this.usesHBaseChecksum = useHBaseCheckSum;
|
||||
return this;
|
||||
}
|
||||
|
||||
public HFileContextBuilder withIncludesMvcc(boolean includesMvcc) {
|
||||
this.includesMvcc = includesMvcc;
|
||||
return this;
|
||||
}
|
||||
|
||||
public HFileContextBuilder withIncludesTags(boolean includesTags) {
|
||||
this.includesTags = includesTags;
|
||||
return this;
|
||||
}
|
||||
|
||||
public HFileContextBuilder withCompressionAlgo(Algorithm compressionAlgo) {
|
||||
this.compressAlgo = compressionAlgo;
|
||||
return this;
|
||||
}
|
||||
|
||||
public HFileContextBuilder withCompressTags(boolean compressTags) {
|
||||
this.compressTags = compressTags;
|
||||
return this;
|
||||
}
|
||||
|
||||
public HFileContextBuilder withChecksumType(ChecksumType checkSumType) {
|
||||
this.checksumType = checkSumType;
|
||||
return this;
|
||||
}
|
||||
|
||||
public HFileContextBuilder withBytesPerCheckSum(int bytesPerChecksum) {
|
||||
this.bytesPerChecksum = bytesPerChecksum;
|
||||
return this;
|
||||
}
|
||||
|
||||
public HFileContextBuilder withBlockSize(int blockSize) {
|
||||
this.blocksize = blockSize;
|
||||
return this;
|
||||
}
|
||||
|
||||
public HFileContextBuilder withDataBlockEncodingOnDisk(DataBlockEncoding encodingOnDisk) {
|
||||
this.encodingOnDisk = encodingOnDisk;
|
||||
return this;
|
||||
}
|
||||
|
||||
public HFileContextBuilder withDataBlockEncodingInCache(DataBlockEncoding encodingInCache) {
|
||||
this.encodingInCache = encodingInCache;
|
||||
return this;
|
||||
}
|
||||
|
||||
public HFileContext build() {
|
||||
return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compressAlgo,
|
||||
compressTags, checksumType, bytesPerChecksum, blocksize, encodingOnDisk, encodingInCache);
|
||||
}
|
||||
}
|
|
@ -239,18 +239,19 @@ public class HFileBlock implements Cacheable {
|
|||
onDiskSizeWithoutHeader = b.getInt();
|
||||
uncompressedSizeWithoutHeader = b.getInt();
|
||||
prevBlockOffset = b.getLong();
|
||||
this.fileContext = new HFileContext();
|
||||
this.fileContext.setUsesHBaseChecksum(usesHBaseChecksum);
|
||||
HFileContextBuilder contextBuilder = new HFileContextBuilder();
|
||||
contextBuilder.withHBaseCheckSum(usesHBaseChecksum);
|
||||
if (usesHBaseChecksum) {
|
||||
this.fileContext.setChecksumType(ChecksumType.codeToType(b.get()));
|
||||
this.fileContext.setBytesPerChecksum(b.getInt());
|
||||
contextBuilder.withChecksumType(ChecksumType.codeToType(b.get()));
|
||||
contextBuilder.withBytesPerCheckSum(b.getInt());
|
||||
this.onDiskDataSizeWithHeader = b.getInt();
|
||||
} else {
|
||||
this.fileContext.setChecksumType(ChecksumType.NULL);
|
||||
this.fileContext.setBytesPerChecksum(0);
|
||||
contextBuilder.withChecksumType(ChecksumType.NULL);
|
||||
contextBuilder.withBytesPerCheckSum(0);
|
||||
this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
|
||||
HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
|
||||
}
|
||||
this.fileContext = contextBuilder.build();
|
||||
buf = b;
|
||||
buf.rewind();
|
||||
}
|
||||
|
@ -1019,9 +1020,18 @@ public class HFileBlock implements Cacheable {
|
|||
* 0 value in bytesPerChecksum.
|
||||
*/
|
||||
public HFileBlock getBlockForCaching() {
|
||||
HFileContext newContext = fileContext.clone();
|
||||
newContext.setBytesPerChecksum(0);
|
||||
newContext.setChecksumType(ChecksumType.NULL); // no checksums in cached data
|
||||
HFileContext newContext = new HFileContextBuilder()
|
||||
.withBlockSize(fileContext.getBlocksize())
|
||||
.withBytesPerCheckSum(0)
|
||||
.withChecksumType(ChecksumType.NULL) // no checksums in cached data
|
||||
.withCompressionAlgo(fileContext.getCompression())
|
||||
.withDataBlockEncodingInCache(fileContext.getEncodingInCache())
|
||||
.withDataBlockEncodingOnDisk(fileContext.getEncodingOnDisk())
|
||||
.withHBaseCheckSum(fileContext.shouldUseHBaseChecksum())
|
||||
.withCompressTags(fileContext.shouldCompressTags())
|
||||
.withIncludesMvcc(fileContext.shouldIncludeMvcc())
|
||||
.withIncludesTags(fileContext.shouldIncludeTags())
|
||||
.build();
|
||||
return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
|
||||
getUncompressedSizeWithoutHeader(), prevOffset, getUncompressedBufferWithHeader(),
|
||||
DONT_FILL_HEADER, startOffset,
|
||||
|
|
|
@ -245,14 +245,14 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
|
|||
* See HBASE-8732
|
||||
* @return a new in cache encoding context
|
||||
*/
|
||||
private HFileBlockEncodingContext createInCacheEncodingContext(HFileContext meta) {
|
||||
HFileContext newMeta = meta.clone();
|
||||
private HFileBlockEncodingContext createInCacheEncodingContext(HFileContext fileContext) {
|
||||
HFileContext newContext = new HFileContext(fileContext);
|
||||
return (inCache != DataBlockEncoding.NONE) ?
|
||||
this.inCache.getEncoder().newDataBlockEncodingContext(
|
||||
this.inCache, dummyHeader, newMeta)
|
||||
this.inCache, dummyHeader, newContext)
|
||||
:
|
||||
// create a default encoding context
|
||||
new HFileBlockDefaultEncodingContext(this.inCache, dummyHeader, newMeta);
|
||||
new HFileBlockDefaultEncodingContext(this.inCache, dummyHeader, newContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -179,12 +179,12 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
}
|
||||
|
||||
protected HFileContext createHFileContext(FixedFileTrailer trailer) {
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setIncludesMvcc(this.includesMemstoreTS);
|
||||
meta.setUsesHBaseChecksum(
|
||||
trailer.getMinorVersion() >= MINOR_VERSION_WITH_CHECKSUM);
|
||||
meta.setCompressAlgo(this.compressAlgo);
|
||||
return meta;
|
||||
HFileContext hFileContext = new HFileContextBuilder()
|
||||
.withIncludesMvcc(this.includesMemstoreTS)
|
||||
.withCompressionAlgo(this.compressAlgo)
|
||||
.withHBaseCheckSum(trailer.getMinorVersion() >= MINOR_VERSION_WITH_CHECKSUM)
|
||||
.build();
|
||||
return hFileContext;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -64,12 +64,13 @@ public class HFileReaderV3 extends HFileReaderV2 {
|
|||
|
||||
@Override
|
||||
protected HFileContext createHFileContext(FixedFileTrailer trailer) {
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setIncludesMvcc(this.includesMemstoreTS);
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
meta.setCompressAlgo(this.compressAlgo);
|
||||
meta.setIncludesTags(true);
|
||||
return meta;
|
||||
HFileContext hfileContext = new HFileContextBuilder()
|
||||
.withIncludesMvcc(this.includesMemstoreTS)
|
||||
.withHBaseCheckSum(true)
|
||||
.withCompressionAlgo(this.compressAlgo)
|
||||
.withIncludesTags(true)
|
||||
.build();
|
||||
return hfileContext;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -273,4 +274,4 @@ public class HFileReaderV3 extends HFileReaderV2 {
|
|||
protected HFileBlock diskToCacheFormat(HFileBlock hfileBlock, final boolean isCompaction) {
|
||||
return dataBlockEncoder.diskToCacheFormat(hfileBlock, isCompaction);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,11 +34,8 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
|
||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||
import org.apache.hadoop.hbase.util.BloomFilterWriter;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
@ -135,9 +132,6 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
protected HFileBlock.Writer createBlockWriter() {
|
||||
// HFile filesystem-level (non-caching) block writer
|
||||
hFileContext.setIncludesTags(false);
|
||||
// This can be set while the write is created itself because
|
||||
// in both cases useHBaseChecksum is going to be true
|
||||
hFileContext.setUsesHBaseChecksum(true);
|
||||
return new HFileBlock.Writer(blockEncoder, hFileContext);
|
||||
}
|
||||
/**
|
||||
|
|
|
@ -186,7 +186,6 @@ public class HFileWriterV3 extends HFileWriterV2 {
|
|||
protected HFileBlock.Writer createBlockWriter() {
|
||||
// HFile filesystem-level (non-caching) block writer
|
||||
hFileContext.setIncludesTags(true);
|
||||
hFileContext.setUsesHBaseChecksum(true);
|
||||
return new HFileBlock.Writer(blockEncoder, hFileContext);
|
||||
}
|
||||
|
||||
|
@ -199,4 +198,4 @@ public class HFileWriterV3 extends HFileWriterV2 {
|
|||
protected int getMinorVersion() {
|
||||
return HFileReaderV3.MAX_MINOR_VERSION;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,9 +49,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
|||
import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
|
@ -194,18 +192,20 @@ public class HFileOutputFormat extends FileOutputFormat<ImmutableBytesWritable,
|
|||
: Integer.parseInt(blockSizeString);
|
||||
Configuration tempConf = new Configuration(conf);
|
||||
tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(AbstractHFileWriter.compressionByName(compression));
|
||||
meta.setChecksumType(HStore.getChecksumType(conf));
|
||||
meta.setBytesPerChecksum(HStore.getBytesPerChecksum(conf));
|
||||
meta.setBlocksize(blockSize);
|
||||
if (dataBlockEncodingStr != null) {
|
||||
meta.setEncodingInCache(DataBlockEncoding.valueOf(dataBlockEncodingStr));
|
||||
meta.setEncodingOnDisk(DataBlockEncoding.valueOf(dataBlockEncodingStr));
|
||||
HFileContextBuilder contextBuilder = new HFileContextBuilder()
|
||||
.withCompressionAlgo(AbstractHFileWriter.compressionByName(compression))
|
||||
.withChecksumType(HStore.getChecksumType(conf))
|
||||
.withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
|
||||
.withBlockSize(blockSize);
|
||||
if(dataBlockEncodingStr != null) {
|
||||
contextBuilder.withDataBlockEncodingOnDisk(DataBlockEncoding.valueOf(dataBlockEncodingStr))
|
||||
.withDataBlockEncodingInCache(DataBlockEncoding.valueOf(dataBlockEncodingStr));
|
||||
}
|
||||
HFileContext hFileContext = contextBuilder.build();
|
||||
|
||||
wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs)
|
||||
.withOutputDir(familydir).withBloomType(bloomType).withComparator(KeyValue.COMPARATOR)
|
||||
.withFileContext(meta)
|
||||
.withFileContext(hFileContext)
|
||||
.build();
|
||||
|
||||
this.writers.put(family, wl);
|
||||
|
|
|
@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
|||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||
|
@ -653,19 +654,19 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
int blocksize = familyDescriptor.getBlocksize();
|
||||
Algorithm compression = familyDescriptor.getCompression();
|
||||
BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
|
||||
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(compression);
|
||||
meta.setChecksumType(HStore.getChecksumType(conf));
|
||||
meta.setBytesPerChecksum(HStore.getBytesPerChecksum(conf));
|
||||
meta.setBlocksize(blocksize);
|
||||
meta.setEncodingInCache(familyDescriptor.getDataBlockEncoding());
|
||||
meta.setEncodingOnDisk(familyDescriptor.getDataBlockEncodingOnDisk());
|
||||
HFileContext hFileContext = new HFileContextBuilder()
|
||||
.withCompressionAlgo(compression)
|
||||
.withChecksumType(HStore.getChecksumType(conf))
|
||||
.withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
|
||||
.withBlockSize(blocksize)
|
||||
.withDataBlockEncodingInCache(familyDescriptor.getDataBlockEncoding())
|
||||
.withDataBlockEncodingOnDisk(familyDescriptor.getDataBlockEncodingOnDisk())
|
||||
.build();
|
||||
halfWriter = new StoreFile.WriterBuilder(conf, cacheConf,
|
||||
fs)
|
||||
.withFilePath(outFile)
|
||||
.withBloomType(bloomFilterType)
|
||||
.withFileContext(meta)
|
||||
.withFileContext(hFileContext)
|
||||
.build();
|
||||
HFileScanner scanner = halfReader.getScanner(false, false, false);
|
||||
scanner.seekTo();
|
||||
|
|
|
@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.io.compress.Compression;
|
|||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
|
@ -813,18 +814,20 @@ public class HStore implements Store {
|
|||
|
||||
private HFileContext createFileContext(Compression.Algorithm compression,
|
||||
boolean includeMVCCReadpoint, boolean includesTag) {
|
||||
HFileContext hFileContext = new HFileContext();
|
||||
hFileContext.setIncludesMvcc(includeMVCCReadpoint);
|
||||
hFileContext.setIncludesTags(includesTag);
|
||||
if (compression == null) {
|
||||
compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
|
||||
}
|
||||
hFileContext.setCompressAlgo(compression);
|
||||
hFileContext.setChecksumType(checksumType);
|
||||
hFileContext.setBytesPerChecksum(bytesPerChecksum);
|
||||
hFileContext.setBlocksize(blocksize);
|
||||
hFileContext.setEncodingInCache(family.getDataBlockEncoding());
|
||||
hFileContext.setEncodingOnDisk(family.getDataBlockEncodingOnDisk());
|
||||
HFileContext hFileContext = new HFileContextBuilder()
|
||||
.withIncludesMvcc(includeMVCCReadpoint)
|
||||
.withIncludesTags(includesTag)
|
||||
.withCompressionAlgo(compression)
|
||||
.withChecksumType(checksumType)
|
||||
.withBytesPerCheckSum(bytesPerChecksum)
|
||||
.withBlockSize(blocksize)
|
||||
.withHBaseCheckSum(true)
|
||||
.withDataBlockEncodingOnDisk(family.getDataBlockEncodingOnDisk())
|
||||
.withDataBlockEncodingInCache(family.getDataBlockEncoding())
|
||||
.build();
|
||||
return hFileContext;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,9 +18,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.util;
|
||||
|
||||
import java.awt.*;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -32,10 +30,11 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
|
||||
/**
|
||||
|
@ -114,8 +113,8 @@ public class CompressionTest {
|
|||
public static void doSmokeTest(FileSystem fs, Path path, String codec)
|
||||
throws Exception {
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
HFileContext context = new HFileContext();
|
||||
context.setCompressAlgo(AbstractHFileWriter.compressionByName(codec));
|
||||
HFileContext context = new HFileContextBuilder()
|
||||
.withCompressionAlgo(AbstractHFileWriter.compressionByName(codec)).build();
|
||||
HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
|
||||
.withPath(fs, path)
|
||||
.withFileContext(context)
|
||||
|
|
|
@ -30,10 +30,10 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
|
@ -189,8 +189,7 @@ public class HFilePerformanceEvaluation {
|
|||
|
||||
@Override
|
||||
void setUp() throws Exception {
|
||||
HFileContext hFileContext = new HFileContext();
|
||||
hFileContext.setBlocksize(RFILE_BLOCKSIZE);
|
||||
HFileContext hFileContext = new HFileContextBuilder().withBlockSize(RFILE_BLOCKSIZE).build();
|
||||
writer =
|
||||
HFile.getWriterFactoryNoCache(conf)
|
||||
.withPath(fs, mf)
|
||||
|
|
|
@ -37,8 +37,6 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
@ -46,7 +44,10 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.MediumTests;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
|
@ -56,17 +57,17 @@ import org.apache.hadoop.hbase.client.Result;
|
|||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.client.RowMutations;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.ScanType;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
|
@ -611,7 +612,7 @@ public class TestRegionObserverInterface {
|
|||
Configuration conf,
|
||||
FileSystem fs, Path path,
|
||||
byte[] family, byte[] qualifier) throws IOException {
|
||||
HFileContext context = new HFileContext();
|
||||
HFileContext context = new HFileContextBuilder().build();
|
||||
HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
|
||||
.withPath(fs, path)
|
||||
.withFileContext(context)
|
||||
|
|
|
@ -37,13 +37,13 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
|||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.mortbay.log.Log;
|
||||
|
||||
@Category(SmallTests.class)
|
||||
public class TestHalfStoreFileReader {
|
||||
|
@ -83,8 +83,7 @@ public class TestHalfStoreFileReader {
|
|||
Configuration conf = TEST_UTIL.getConfiguration();
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
CacheConfig cacheConf = new CacheConfig(conf);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(1024);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
|
||||
HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
|
||||
.withPath(fs, p)
|
||||
.withFileContext(meta)
|
||||
|
@ -149,8 +148,7 @@ public class TestHalfStoreFileReader {
|
|||
Configuration conf = TEST_UTIL.getConfiguration();
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
CacheConfig cacheConf = new CacheConfig(conf);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(1024);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
|
||||
HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
|
||||
.withPath(fs, p)
|
||||
.withFileContext(meta)
|
||||
|
|
|
@ -31,11 +31,12 @@ import java.util.Random;
|
|||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.Tag;
|
||||
import org.apache.hadoop.hbase.KeyValue.Type;
|
||||
import org.apache.hadoop.hbase.LargeTests;
|
||||
import org.apache.hadoop.hbase.Tag;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.test.RedundantKVGenerator;
|
||||
import org.junit.Test;
|
||||
|
@ -76,11 +77,11 @@ public class TestDataBlockEncoders {
|
|||
private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo,
|
||||
DataBlockEncoding encoding) {
|
||||
DataBlockEncoder encoder = encoding.getEncoder();
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTags);
|
||||
meta.setCompressAlgo(algo);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(false)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTags)
|
||||
.withCompressionAlgo(algo).build();
|
||||
if (encoder != null) {
|
||||
return encoder.newDataBlockEncodingContext(encoding,
|
||||
HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
|
||||
|
@ -114,11 +115,11 @@ public class TestDataBlockEncoders {
|
|||
DataInputStream dis = new DataInputStream(bais);
|
||||
ByteBuffer actualDataset;
|
||||
DataBlockEncoder encoder = encoding.getEncoder();
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTags);
|
||||
meta.setCompressAlgo(Compression.Algorithm.NONE);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(false)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTags)
|
||||
.withCompressionAlgo(Compression.Algorithm.NONE).build();
|
||||
actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(meta));
|
||||
dataset.rewind();
|
||||
actualDataset.rewind();
|
||||
|
@ -219,11 +220,12 @@ public class TestDataBlockEncoders {
|
|||
|
||||
ByteBuffer encodedBuffer = ByteBuffer.wrap(encodeBytes(encoding, originalBuffer));
|
||||
DataBlockEncoder encoder = encoding.getEncoder();
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTags);
|
||||
meta.setCompressAlgo(Compression.Algorithm.NONE);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(false)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTags)
|
||||
.withCompressionAlgo(Compression.Algorithm.NONE)
|
||||
.build();
|
||||
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
|
||||
encoder.newDataBlockDecodingContext(meta));
|
||||
seeker.setCurrentBuffer(encodedBuffer);
|
||||
|
@ -274,11 +276,12 @@ public class TestDataBlockEncoders {
|
|||
throw new RuntimeException(String.format("Bug while encoding using '%s'",
|
||||
encoder.toString()), e);
|
||||
}
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTags);
|
||||
meta.setCompressAlgo(Compression.Algorithm.NONE);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(false)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTags)
|
||||
.withCompressionAlgo(Compression.Algorithm.NONE)
|
||||
.build();
|
||||
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
|
||||
encoder.newDataBlockDecodingContext(meta));
|
||||
seeker.setCurrentBuffer(encodedBuffer);
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeCodec;
|
|||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder.EncodedSeeker;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.CollectionBackedScanner;
|
||||
import org.junit.Assert;
|
||||
|
@ -96,11 +97,11 @@ public class TestPrefixTreeEncoding {
|
|||
PrefixTreeCodec encoder = new PrefixTreeCodec();
|
||||
int batchId = numBatchesWritten++;
|
||||
ByteBuffer dataBuffer = generateFixedTestData(kvset, batchId, false, includesTag);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setIncludesMvcc(false);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setCompressAlgo(Algorithm.NONE);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(false)
|
||||
.withIncludesMvcc(false)
|
||||
.withIncludesTags(includesTag)
|
||||
.withCompressionAlgo(Algorithm.NONE).build();
|
||||
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
||||
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
|
||||
encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
|
||||
|
@ -137,11 +138,12 @@ public class TestPrefixTreeEncoding {
|
|||
public void testScanWithRandomData() throws Exception {
|
||||
PrefixTreeCodec encoder = new PrefixTreeCodec();
|
||||
ByteBuffer dataBuffer = generateRandomTestData(kvset, numBatchesWritten++, includesTag);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setIncludesMvcc(false);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setCompressAlgo(Algorithm.NONE);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(false)
|
||||
.withIncludesMvcc(false)
|
||||
.withIncludesTags(includesTag)
|
||||
.withCompressionAlgo(Algorithm.NONE)
|
||||
.build();
|
||||
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
||||
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
|
||||
encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
|
||||
|
@ -173,11 +175,12 @@ public class TestPrefixTreeEncoding {
|
|||
PrefixTreeCodec encoder = new PrefixTreeCodec();
|
||||
int batchId = numBatchesWritten++;
|
||||
ByteBuffer dataBuffer = generateRandomTestData(kvset, batchId, includesTag);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setIncludesMvcc(false);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setCompressAlgo(Algorithm.NONE);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(false)
|
||||
.withIncludesMvcc(false)
|
||||
.withIncludesTags(includesTag)
|
||||
.withCompressionAlgo(Algorithm.NONE)
|
||||
.build();
|
||||
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
||||
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
|
||||
encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
|
||||
|
@ -194,11 +197,12 @@ public class TestPrefixTreeEncoding {
|
|||
PrefixTreeCodec encoder = new PrefixTreeCodec();
|
||||
int batchId = numBatchesWritten++;
|
||||
ByteBuffer dataBuffer = generateFixedTestData(kvset, batchId, includesTag);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setIncludesMvcc(false);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setCompressAlgo(Algorithm.NONE);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(false)
|
||||
.withIncludesMvcc(false)
|
||||
.withIncludesTags(includesTag)
|
||||
.withCompressionAlgo(Algorithm.NONE)
|
||||
.build();
|
||||
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
||||
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
|
||||
encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil;
|
|||
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
|
||||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
|
||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||
|
||||
|
@ -341,13 +340,14 @@ public class CacheTestUtils {
|
|||
cachedBuffer.putInt(uncompressedSizeWithoutHeader);
|
||||
cachedBuffer.putLong(prevBlockOffset);
|
||||
cachedBuffer.rewind();
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(false);
|
||||
meta.setCompressAlgo(Compression.Algorithm.NONE);
|
||||
meta.setBytesPerChecksum(0);
|
||||
meta.setChecksumType(ChecksumType.NULL);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(false)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(false)
|
||||
.withCompressionAlgo(Compression.Algorithm.NONE)
|
||||
.withBytesPerCheckSum(0)
|
||||
.withChecksumType(ChecksumType.NULL)
|
||||
.build();
|
||||
HFileBlock generated = new HFileBlock(BlockType.DATA,
|
||||
onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader,
|
||||
prevBlockOffset, cachedBuffer, HFileBlock.DONT_FILL_HEADER,
|
||||
|
|
|
@ -307,13 +307,10 @@ public class TestCacheOnWrite {
|
|||
}
|
||||
Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(),
|
||||
"test_cache_on_write");
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(compress);
|
||||
meta.setChecksumType(CKTYPE);
|
||||
meta.setBytesPerChecksum(CKBYTES);
|
||||
meta.setBlocksize(DATA_BLOCK_SIZE);
|
||||
meta.setEncodingInCache(encoder.getEncodingInCache());
|
||||
meta.setEncodingOnDisk(encoder.getEncodingOnDisk());
|
||||
HFileContext meta = new HFileContextBuilder().withCompressionAlgo(compress)
|
||||
.withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL)
|
||||
.withBlockSize(DATA_BLOCK_SIZE).withDataBlockEncodingInCache(encoder.getEncodingInCache())
|
||||
.withDataBlockEncodingOnDisk(encoder.getEncodingOnDisk()).build();
|
||||
StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs)
|
||||
.withOutputDir(storeFileParentDir).withComparator(KeyValue.COMPARATOR)
|
||||
.withFileContext(meta)
|
||||
|
|
|
@ -19,11 +19,13 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.io.hfile;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
import static org.apache.hadoop.hbase.io.compress.Compression.Algorithm.GZ;
|
||||
import static org.apache.hadoop.hbase.io.compress.Compression.Algorithm.NONE;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
|
@ -37,14 +39,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.SmallTests;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||
|
||||
import static org.apache.hadoop.hbase.io.compress.Compression.Algorithm.*;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
@ -92,12 +89,13 @@ public class TestChecksum {
|
|||
Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
|
||||
+ algo);
|
||||
FSDataOutputStream os = fs.create(path);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(algo);
|
||||
meta.setIncludesMvcc(true);
|
||||
meta.setIncludesTags(useTags);
|
||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withCompressionAlgo(algo)
|
||||
.withIncludesMvcc(true)
|
||||
.withIncludesTags(useTags)
|
||||
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||
.build();
|
||||
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
|
||||
long totalSize = 0;
|
||||
for (int blockId = 0; blockId < 2; ++blockId) {
|
||||
|
@ -114,11 +112,12 @@ public class TestChecksum {
|
|||
|
||||
// Do a read that purposely introduces checksum verification failures.
|
||||
FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
|
||||
meta = new HFileContext();
|
||||
meta.setCompressAlgo(algo);
|
||||
meta.setIncludesMvcc(true);
|
||||
meta.setIncludesTags(useTags);
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
meta = new HFileContextBuilder()
|
||||
.withCompressionAlgo(algo)
|
||||
.withIncludesMvcc(true)
|
||||
.withIncludesTags(useTags)
|
||||
.withHBaseCheckSum(true)
|
||||
.build();
|
||||
HFileBlock.FSReader hbr = new FSReaderV2Test(is, totalSize, fs, path, meta);
|
||||
HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
|
||||
b.sanityCheck();
|
||||
|
@ -197,13 +196,14 @@ public class TestChecksum {
|
|||
Path path = new Path(TEST_UTIL.getDataTestDir(), "checksumChunk_" +
|
||||
algo + bytesPerChecksum);
|
||||
FSDataOutputStream os = fs.create(path);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(algo);
|
||||
meta.setIncludesMvcc(true);
|
||||
meta.setIncludesTags(useTags);
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
meta.setBytesPerChecksum(bytesPerChecksum);
|
||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withCompressionAlgo(algo)
|
||||
.withIncludesMvcc(true)
|
||||
.withIncludesTags(useTags)
|
||||
.withHBaseCheckSum(true)
|
||||
.withBytesPerCheckSum(bytesPerChecksum)
|
||||
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||
.build();
|
||||
HFileBlock.Writer hbw = new HFileBlock.Writer(null,
|
||||
meta);
|
||||
|
||||
|
@ -236,12 +236,13 @@ public class TestChecksum {
|
|||
// Read data back from file.
|
||||
FSDataInputStream is = fs.open(path);
|
||||
FSDataInputStream nochecksum = hfs.getNoChecksumFs().open(path);
|
||||
meta = new HFileContext();
|
||||
meta.setCompressAlgo(algo);
|
||||
meta.setIncludesMvcc(true);
|
||||
meta.setIncludesTags(useTags);
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
meta.setBytesPerChecksum(bytesPerChecksum);
|
||||
meta = new HFileContextBuilder()
|
||||
.withCompressionAlgo(algo)
|
||||
.withIncludesMvcc(true)
|
||||
.withIncludesTags(useTags)
|
||||
.withHBaseCheckSum(true)
|
||||
.withBytesPerCheckSum(bytesPerChecksum)
|
||||
.build();
|
||||
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(
|
||||
is, nochecksum), totalSize, hfs, path, meta);
|
||||
HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
|
||||
|
|
|
@ -83,8 +83,7 @@ public class TestHFile extends HBaseTestCase {
|
|||
public void testEmptyHFile() throws IOException {
|
||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||
Path f = new Path(ROOT_DIR, getName());
|
||||
HFileContext context = new HFileContext();
|
||||
context.setIncludesTags(false);
|
||||
HFileContext context = new HFileContextBuilder().withIncludesTags(false).build();
|
||||
Writer w =
|
||||
HFile.getWriterFactory(conf, cacheConf).withPath(fs, f).withFileContext(context).create();
|
||||
w.close();
|
||||
|
@ -133,7 +132,7 @@ public class TestHFile extends HBaseTestCase {
|
|||
public void testCorruptTruncatedHFile() throws IOException {
|
||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||
Path f = new Path(ROOT_DIR, getName());
|
||||
HFileContext context = new HFileContext();
|
||||
HFileContext context = new HFileContextBuilder().build();
|
||||
Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
|
||||
.withFileContext(context).create();
|
||||
writeSomeRecords(w, 0, 100, false);
|
||||
|
@ -224,9 +223,10 @@ public class TestHFile extends HBaseTestCase {
|
|||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||
Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString() + useTags);
|
||||
FSDataOutputStream fout = createFSOutput(ncTFile);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(minBlockSize);
|
||||
meta.setCompressAlgo(AbstractHFileWriter.compressionByName(codec));
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withBlockSize(minBlockSize)
|
||||
.withCompressionAlgo(AbstractHFileWriter.compressionByName(codec))
|
||||
.build();
|
||||
Writer writer = HFile.getWriterFactory(conf, cacheConf)
|
||||
.withOutputStream(fout)
|
||||
.withFileContext(meta)
|
||||
|
@ -313,9 +313,9 @@ public class TestHFile extends HBaseTestCase {
|
|||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||
Path mFile = new Path(ROOT_DIR, "meta.hfile");
|
||||
FSDataOutputStream fout = createFSOutput(mFile);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(AbstractHFileWriter.compressionByName(compress));
|
||||
meta.setBlocksize(minBlockSize);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withCompressionAlgo(AbstractHFileWriter.compressionByName(compress))
|
||||
.withBlockSize(minBlockSize).build();
|
||||
Writer writer = HFile.getWriterFactory(conf, cacheConf)
|
||||
.withOutputStream(fout)
|
||||
.withFileContext(meta)
|
||||
|
@ -347,9 +347,8 @@ public class TestHFile extends HBaseTestCase {
|
|||
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
|
||||
Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
|
||||
FSDataOutputStream fout = createFSOutput(mFile);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo((compressAlgo));
|
||||
meta.setBlocksize(minBlockSize);
|
||||
HFileContext meta = new HFileContextBuilder().withCompressionAlgo(compressAlgo)
|
||||
.withBlockSize(minBlockSize).build();
|
||||
Writer writer = HFile.getWriterFactory(conf, cacheConf)
|
||||
.withOutputStream(fout)
|
||||
.withFileContext(meta)
|
||||
|
|
|
@ -218,12 +218,13 @@ public class TestHFileBlock {
|
|||
static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
|
||||
boolean includesMemstoreTS, boolean includesTag) throws IOException {
|
||||
final BlockType blockType = BlockType.DATA;
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(algo);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withCompressionAlgo(algo)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTag)
|
||||
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||
.build();
|
||||
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
|
||||
DataOutputStream dos = hbw.startWriting(blockType);
|
||||
writeTestBlockContents(dos);
|
||||
|
@ -301,12 +302,13 @@ public class TestHFileBlock {
|
|||
Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
|
||||
+ algo);
|
||||
FSDataOutputStream os = fs.create(path);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(algo);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withCompressionAlgo(algo)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTag)
|
||||
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||
.build();
|
||||
HFileBlock.Writer hbw = new HFileBlock.Writer(null,
|
||||
meta);
|
||||
long totalSize = 0;
|
||||
|
@ -320,11 +322,11 @@ public class TestHFileBlock {
|
|||
os.close();
|
||||
|
||||
FSDataInputStream is = fs.open(path);
|
||||
meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setCompressAlgo(algo);
|
||||
meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(true)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTag)
|
||||
.withCompressionAlgo(algo).build();
|
||||
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
|
||||
HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
|
||||
is.close();
|
||||
|
@ -383,12 +385,13 @@ public class TestHFileBlock {
|
|||
FSDataOutputStream os = fs.create(path);
|
||||
HFileDataBlockEncoder dataBlockEncoder =
|
||||
new HFileDataBlockEncoderImpl(encoding);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(algo);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withCompressionAlgo(algo)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTag)
|
||||
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||
.build();
|
||||
HFileBlock.Writer hbw = new HFileBlock.Writer(dataBlockEncoder,
|
||||
meta);
|
||||
long totalSize = 0;
|
||||
|
@ -404,11 +407,12 @@ public class TestHFileBlock {
|
|||
os.close();
|
||||
|
||||
FSDataInputStream is = fs.open(path);
|
||||
meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setCompressAlgo(algo);
|
||||
meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(true)
|
||||
.withCompressionAlgo(algo)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTag)
|
||||
.build();
|
||||
HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
|
||||
hbr.setDataBlockEncoder(dataBlockEncoder);
|
||||
hbr.setIncludesMemstoreTS(includesMemstoreTS);
|
||||
|
@ -457,10 +461,11 @@ public class TestHFileBlock {
|
|||
DataBlockEncoder encoder = encoding.getEncoder();
|
||||
int headerLen = dummyHeader.length;
|
||||
byte[] encodedResultWithHeader = null;
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(algo);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(useTag);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withCompressionAlgo(algo)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(useTag)
|
||||
.build();
|
||||
if (encoder != null) {
|
||||
HFileBlockEncodingContext encodingCtx = encoder.newDataBlockEncodingContext(encoding,
|
||||
dummyHeader, meta);
|
||||
|
@ -550,11 +555,11 @@ public class TestHFileBlock {
|
|||
expectedPrevOffsets, expectedTypes, expectedContents);
|
||||
|
||||
FSDataInputStream is = fs.open(path);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setCompressAlgo(algo);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(true)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTag)
|
||||
.withCompressionAlgo(algo).build();
|
||||
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
|
||||
long curOffset = 0;
|
||||
for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
|
||||
|
@ -733,11 +738,12 @@ public class TestHFileBlock {
|
|||
writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
|
||||
FSDataInputStream is = fs.open(path);
|
||||
long fileSize = fs.getFileStatus(path).getLen();
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setCompressAlgo(compressAlgo);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(true)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTag)
|
||||
.withCompressionAlgo(compressAlgo)
|
||||
.build();
|
||||
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, fileSize, meta);
|
||||
|
||||
Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
|
||||
|
@ -769,13 +775,14 @@ public class TestHFileBlock {
|
|||
) throws IOException {
|
||||
boolean cacheOnWrite = expectedContents != null;
|
||||
FSDataOutputStream os = fs.create(path);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setCompressAlgo(compressAlgo);
|
||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(true)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTag)
|
||||
.withCompressionAlgo(compressAlgo)
|
||||
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||
.build();
|
||||
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
|
||||
Map<BlockType, Long> prevOffsetByType = new HashMap<BlockType, Long>();
|
||||
long totalSize = 0;
|
||||
|
@ -839,14 +846,13 @@ public class TestHFileBlock {
|
|||
for (int size : new int[] { 100, 256, 12345 }) {
|
||||
byte[] byteArr = new byte[HConstants.HFILEBLOCK_HEADER_SIZE + size];
|
||||
ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setCompressAlgo(Algorithm.NONE);
|
||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
||||
meta.setChecksumType(ChecksumType.NULL);
|
||||
meta.setBytesPerChecksum(0);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTag)
|
||||
.withHBaseCheckSum(false)
|
||||
.withCompressionAlgo(Algorithm.NONE)
|
||||
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||
.withChecksumType(ChecksumType.NULL).build();
|
||||
HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
|
||||
HFileBlock.FILL_HEADER, -1,
|
||||
0, meta);
|
||||
|
|
|
@ -42,13 +42,12 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.SmallTests;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
|
||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
|
||||
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
|
@ -198,11 +197,12 @@ public class TestHFileBlockCompatibility {
|
|||
os.close();
|
||||
|
||||
FSDataInputStream is = fs.open(path);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setCompressAlgo(algo);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(false)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTag)
|
||||
.withCompressionAlgo(algo)
|
||||
.build();
|
||||
HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
|
||||
totalSize, fs, path, meta);
|
||||
HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
|
||||
|
@ -280,11 +280,12 @@ public class TestHFileBlockCompatibility {
|
|||
os.close();
|
||||
|
||||
FSDataInputStream is = fs.open(path);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setCompressAlgo(algo);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(false)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTag)
|
||||
.withCompressionAlgo(algo)
|
||||
.build();
|
||||
HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
|
||||
totalSize, fs, path, meta);
|
||||
hbr.setDataBlockEncoder(dataBlockEncoder);
|
||||
|
@ -420,12 +421,12 @@ public class TestHFileBlockCompatibility {
|
|||
this.dataBlockEncoder = dataBlockEncoder != null
|
||||
? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
|
||||
|
||||
meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(includesTag);
|
||||
meta.setCompressAlgo(compressionAlgorithm);
|
||||
|
||||
meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(false)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(includesTag)
|
||||
.withCompressionAlgo(compressionAlgorithm)
|
||||
.build();
|
||||
defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null, DUMMY_HEADER, meta);
|
||||
dataBlockEncodingCtx =
|
||||
this.dataBlockEncoder.newOnDiskDataBlockEncodingContext(
|
||||
|
@ -730,9 +731,11 @@ public class TestHFileBlockCompatibility {
|
|||
* Creates a new HFileBlock.
|
||||
*/
|
||||
public HFileBlock getBlockForCaching() {
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setChecksumType(ChecksumType.NULL);
|
||||
meta.setBytesPerChecksum(0);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(false)
|
||||
.withChecksumType(ChecksumType.NULL)
|
||||
.withBytesPerCheckSum(0)
|
||||
.build();
|
||||
return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
|
||||
getUncompressedSizeWithoutHeader(), prevOffset,
|
||||
getUncompressedBufferWithHeader(), DONT_FILL_HEADER, startOffset,
|
||||
|
|
|
@ -188,11 +188,12 @@ public class TestHFileBlockIndex {
|
|||
LOG.info("Size of " + path + ": " + fileSize);
|
||||
|
||||
FSDataInputStream istream = fs.open(path);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(useTags);
|
||||
meta.setCompressAlgo(compr);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(true)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(useTags)
|
||||
.withCompressionAlgo(compr)
|
||||
.build();
|
||||
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream, fs.getFileStatus(path)
|
||||
.getLen(), meta);
|
||||
|
||||
|
@ -241,13 +242,14 @@ public class TestHFileBlockIndex {
|
|||
|
||||
private void writeWholeIndex(boolean useTags) throws IOException {
|
||||
assertEquals(0, keys.size());
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(useTags);
|
||||
meta.setCompressAlgo(compr);
|
||||
meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE);
|
||||
meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(true)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(useTags)
|
||||
.withCompressionAlgo(compr)
|
||||
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||
.build();
|
||||
HFileBlock.Writer hbw = new HFileBlock.Writer(null,
|
||||
meta);
|
||||
FSDataOutputStream outputStream = fs.create(path);
|
||||
|
@ -516,9 +518,10 @@ public class TestHFileBlockIndex {
|
|||
|
||||
// Write the HFile
|
||||
{
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(SMALL_BLOCK_SIZE);
|
||||
meta.setCompressAlgo(compr);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withBlockSize(SMALL_BLOCK_SIZE)
|
||||
.withCompressionAlgo(compr)
|
||||
.build();
|
||||
HFile.Writer writer =
|
||||
HFile.getWriterFactory(conf, cacheConf)
|
||||
.withPath(fs, hfilePath)
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.SmallTests;
|
||||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
|
||||
|
@ -127,13 +126,12 @@ public class TestHFileDataBlockEncoder {
|
|||
buf.position(headerSize);
|
||||
keyValues.rewind();
|
||||
buf.put(keyValues);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(false);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(useTags);
|
||||
meta.setCompressAlgo(Compression.Algorithm.NONE);
|
||||
meta.setBlocksize(0);
|
||||
meta.setChecksumType(ChecksumType.NULL);
|
||||
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(useTags)
|
||||
.withBlockSize(0)
|
||||
.withChecksumType(ChecksumType.NULL)
|
||||
.build();
|
||||
HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
|
||||
HFileBlock.FILL_HEADER, 0,
|
||||
0, meta);
|
||||
|
@ -203,13 +201,14 @@ public class TestHFileDataBlockEncoder {
|
|||
buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
|
||||
keyValues.rewind();
|
||||
buf.put(keyValues);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(useTag);
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
meta.setCompressAlgo(Algorithm.NONE);
|
||||
meta.setBlocksize(0);
|
||||
meta.setChecksumType(ChecksumType.NULL);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(useTag)
|
||||
.withHBaseCheckSum(true)
|
||||
.withCompressionAlgo(Algorithm.NONE)
|
||||
.withBlockSize(0)
|
||||
.withChecksumType(ChecksumType.NULL)
|
||||
.build();
|
||||
HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
|
||||
HFileBlock.FILL_HEADER, 0,
|
||||
0, meta);
|
||||
|
|
|
@ -23,10 +23,10 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.apache.hadoop.hbase.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
/**
|
||||
* Test a case when an inline index chunk is converted to a root one. This reproduces the bug in
|
||||
|
@ -52,8 +52,7 @@ public class TestHFileInlineToRootChunkConversion {
|
|||
FileSystem fs = FileSystem.get(conf);
|
||||
CacheConfig cacheConf = new CacheConfig(conf);
|
||||
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
|
||||
HFileContext context = new HFileContext();
|
||||
context.setBlocksize(16);
|
||||
HFileContext context = new HFileContextBuilder().withBlockSize(16).build();
|
||||
HFileWriterV2 hfw =
|
||||
(HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf)
|
||||
.withFileContext(context)
|
||||
|
|
|
@ -161,9 +161,9 @@ public class TestHFilePerformance extends TestCase {
|
|||
FSDataOutputStream fout = createFSOutput(path);
|
||||
|
||||
if ("HFile".equals(fileType)){
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(AbstractHFileWriter.compressionByName(codecName));
|
||||
meta.setBlocksize(minBlockSize);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withCompressionAlgo(AbstractHFileWriter.compressionByName(codecName))
|
||||
.withBlockSize(minBlockSize).build();
|
||||
System.out.println("HFile write method: ");
|
||||
HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
|
||||
.withOutputStream(fout)
|
||||
|
|
|
@ -127,9 +127,10 @@ public class TestHFileSeek extends TestCase {
|
|||
long totalBytes = 0;
|
||||
FSDataOutputStream fout = createFSOutput(path, fs);
|
||||
try {
|
||||
HFileContext context = new HFileContext();
|
||||
context.setBlocksize(options.minBlockSize);
|
||||
context.setCompressAlgo(AbstractHFileWriter.compressionByName(options.compress));
|
||||
HFileContext context = new HFileContextBuilder()
|
||||
.withBlockSize(options.minBlockSize)
|
||||
.withCompressionAlgo(AbstractHFileWriter.compressionByName(options.compress))
|
||||
.build();
|
||||
Writer writer = HFile.getWriterFactoryNoCache(conf)
|
||||
.withOutputStream(fout)
|
||||
.withFileContext(context)
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
|||
import org.apache.hadoop.hbase.SmallTests;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
|
@ -94,9 +93,10 @@ public class TestHFileWriterV2 {
|
|||
private void writeDataAndReadFromHFile(Path hfilePath,
|
||||
Algorithm compressAlgo, int entryCount, boolean findMidKey) throws IOException {
|
||||
|
||||
HFileContext context = new HFileContext();
|
||||
context.setBlocksize(4096);
|
||||
context.setCompressAlgo(compressAlgo);
|
||||
HFileContext context = new HFileContextBuilder()
|
||||
.withBlockSize(4096)
|
||||
.withCompressionAlgo(compressAlgo)
|
||||
.build();
|
||||
HFileWriterV2 writer = (HFileWriterV2)
|
||||
new HFileWriterV2.WriterFactoryV2(conf, new CacheConfig(conf))
|
||||
.withPath(fs, hfilePath)
|
||||
|
@ -137,11 +137,12 @@ public class TestHFileWriterV2 {
|
|||
assertEquals(2, trailer.getMajorVersion());
|
||||
assertEquals(entryCount, trailer.getEntryCount());
|
||||
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
meta.setIncludesMvcc(false);
|
||||
meta.setIncludesTags(false);
|
||||
meta.setCompressAlgo(compressAlgo);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withHBaseCheckSum(true)
|
||||
.withIncludesMvcc(false)
|
||||
.withIncludesTags(false)
|
||||
.withCompressionAlgo(compressAlgo)
|
||||
.build();
|
||||
|
||||
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(fsdis, fileSize, meta);
|
||||
// Comparator class name is stored in the trailer in version 2.
|
||||
|
|
|
@ -115,9 +115,9 @@ public class TestHFileWriterV3 {
|
|||
|
||||
private void writeDataAndReadFromHFile(Path hfilePath,
|
||||
Algorithm compressAlgo, int entryCount, boolean findMidKey, boolean useTags) throws IOException {
|
||||
HFileContext context = new HFileContext();
|
||||
context.setBlocksize(4096);
|
||||
context.setCompressAlgo(compressAlgo);
|
||||
HFileContext context = new HFileContextBuilder()
|
||||
.withBlockSize(4096)
|
||||
.withCompressionAlgo(compressAlgo).build();
|
||||
HFileWriterV3 writer = (HFileWriterV3)
|
||||
new HFileWriterV3.WriterFactoryV3(conf, new CacheConfig(conf))
|
||||
.withPath(fs, hfilePath)
|
||||
|
@ -168,11 +168,11 @@ public class TestHFileWriterV3 {
|
|||
|
||||
assertEquals(3, trailer.getMajorVersion());
|
||||
assertEquals(entryCount, trailer.getEntryCount());
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(compressAlgo);
|
||||
meta.setIncludesMvcc(false);
|
||||
meta.setIncludesTags(useTags);
|
||||
meta.setUsesHBaseChecksum(true);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withCompressionAlgo(compressAlgo)
|
||||
.withIncludesMvcc(false)
|
||||
.withIncludesTags(useTags)
|
||||
.withHBaseCheckSum(true).build();
|
||||
HFileBlock.FSReader blockReader =
|
||||
new HFileBlock.FSReaderV2(fsdis, fileSize, meta);
|
||||
// Comparator class name is stored in the trailer in version 2.
|
||||
|
|
|
@ -57,8 +57,7 @@ public class TestReseekTo {
|
|||
TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
|
||||
}
|
||||
CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
|
||||
HFileContext context = new HFileContext();
|
||||
context.setBlocksize(4000);
|
||||
HFileContext context = new HFileContextBuilder().withBlockSize(4000).build();
|
||||
HFile.Writer writer = HFile.getWriterFactory(
|
||||
TEST_UTIL.getConfiguration(), cacheConf)
|
||||
.withOutputStream(fout)
|
||||
|
|
|
@ -22,9 +22,12 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.SmallTests;
|
||||
import org.apache.hadoop.hbase.Tag;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
/**
|
||||
|
@ -73,8 +76,7 @@ public class TestSeekTo extends HBaseTestCase {
|
|||
}
|
||||
FSDataOutputStream fout = this.fs.create(ncTFile);
|
||||
int blocksize = toKV("a", tagUsage).getLength() * 3;
|
||||
HFileContext context = new HFileContext();
|
||||
context.setBlocksize(blocksize);
|
||||
HFileContext context = new HFileContextBuilder().withBlockSize(blocksize).build();
|
||||
HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withOutputStream(fout)
|
||||
.withFileContext(context)
|
||||
// NOTE: This test is dependent on this deprecated nonstandard
|
||||
|
|
|
@ -18,10 +18,22 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.mapreduce;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.LargeTests;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
|
@ -29,6 +41,7 @@ import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
|||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
|
@ -38,11 +51,6 @@ import org.junit.BeforeClass;
|
|||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* Test cases for the "load" half of the HFileOutputFormat bulk load
|
||||
* functionality. These tests run faster than the full MR cluster
|
||||
|
@ -262,9 +270,10 @@ public class TestLoadIncrementalHFiles {
|
|||
byte[] family, byte[] qualifier,
|
||||
byte[] startKey, byte[] endKey, int numRows) throws IOException
|
||||
{
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(BLOCKSIZE);
|
||||
meta.setCompressAlgo(COMPRESSION);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withBlockSize(BLOCKSIZE)
|
||||
.withCompressionAlgo(COMPRESSION)
|
||||
.build();
|
||||
HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration))
|
||||
.withPath(fs, path)
|
||||
.withFileContext(meta)
|
||||
|
|
|
@ -38,9 +38,9 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
||||
import org.apache.hadoop.io.BytesWritable;
|
||||
|
||||
|
@ -183,9 +183,8 @@ public class CreateRandomStoreFile {
|
|||
Integer.valueOf(cmdLine.getOptionValue(INDEX_BLOCK_SIZE_OPTION)));
|
||||
}
|
||||
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(compr);
|
||||
meta.setBlocksize(blockSize);
|
||||
HFileContext meta = new HFileContextBuilder().withCompressionAlgo(compr)
|
||||
.withBlockSize(blockSize).build();
|
||||
StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf,
|
||||
new CacheConfig(conf), fs)
|
||||
.withOutputDir(outputDir)
|
||||
|
|
|
@ -45,9 +45,10 @@ import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
|||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.encoding.EncodedDataBlock;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileReaderV2;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -214,10 +215,10 @@ public class DataBlockEncodingTool {
|
|||
continue;
|
||||
}
|
||||
DataBlockEncoder d = encoding.getEncoder();
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setCompressAlgo(Compression.Algorithm.NONE);
|
||||
meta.setIncludesMvcc(includesMemstoreTS);
|
||||
meta.setIncludesTags(useTag);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withCompressionAlgo(Compression.Algorithm.NONE)
|
||||
.withIncludesMvcc(includesMemstoreTS)
|
||||
.withIncludesTags(useTag).build();
|
||||
codecs.add(new EncodedDataBlock(d, encoding, rawKVs, meta ));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,13 +36,16 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.MediumTests;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
|
||||
import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
||||
import org.apache.hadoop.hbase.util.ByteBloomFilter;
|
||||
|
@ -292,8 +295,7 @@ public class TestCompoundBloomFilter {
|
|||
BLOOM_BLOCK_SIZES[t]);
|
||||
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
|
||||
cacheConf = new CacheConfig(conf);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(BLOCK_SIZES[t]);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCK_SIZES[t]).build();
|
||||
StoreFile.Writer w = new StoreFile.WriterBuilder(conf, cacheConf, fs)
|
||||
.withOutputDir(TEST_UTIL.getDataTestDir())
|
||||
.withBloomType(bt)
|
||||
|
|
|
@ -37,12 +37,19 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.FilterFileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PositionedReadable;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.MediumTests;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -73,8 +80,7 @@ public class TestFSErrorsExposed {
|
|||
FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
|
||||
FileSystem fs = new HFileSystem(faultyfs);
|
||||
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(2*1024);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(
|
||||
util.getConfiguration(), cacheConf, hfs)
|
||||
.withOutputDir(hfilePath)
|
||||
|
@ -125,8 +131,7 @@ public class TestFSErrorsExposed {
|
|||
FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
|
||||
HFileSystem fs = new HFileSystem(faultyfs);
|
||||
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(2 * 1024);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(
|
||||
util.getConfiguration(), cacheConf, hfs)
|
||||
.withOutputDir(hfilePath)
|
||||
|
|
|
@ -27,9 +27,16 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.LargeTests;
|
||||
import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread;
|
||||
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
|
||||
import org.apache.hadoop.hbase.TableExistsException;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.HConnection;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.RegionServerCallable;
|
||||
|
@ -38,12 +45,12 @@ import org.apache.hadoop.hbase.client.ResultScanner;
|
|||
import org.apache.hadoop.hbase.client.RpcRetryingCaller;
|
||||
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.TableExistsException;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.protobuf.RequestConverter;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
|
||||
|
@ -51,9 +58,9 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequ
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
/**
|
||||
* Tests bulk loading of HFiles and shows the atomicity or lack of atomicity of
|
||||
|
@ -89,9 +96,9 @@ public class TestHRegionServerBulkLoad {
|
|||
*/
|
||||
public static void createHFile(FileSystem fs, Path path, byte[] family,
|
||||
byte[] qualifier, byte[] value, int numRows) throws IOException {
|
||||
HFileContext context = new HFileContext();
|
||||
context.setBlocksize(BLOCKSIZE);
|
||||
context.setCompressAlgo(COMPRESSION);
|
||||
HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE)
|
||||
.withCompressionAlgo(COMPRESSION)
|
||||
.build();
|
||||
HFile.Writer writer = HFile
|
||||
.getWriterFactory(conf, new CacheConfig(conf))
|
||||
.withPath(fs, path)
|
||||
|
|
|
@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
|||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
|
@ -321,8 +322,7 @@ public class TestStore extends TestCase {
|
|||
long seqid = f.getMaxSequenceId();
|
||||
Configuration c = HBaseConfiguration.create();
|
||||
FileSystem fs = FileSystem.get(c);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(StoreFile.DEFAULT_BLOCKSIZE_SMALL);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL).build();
|
||||
StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c),
|
||||
fs)
|
||||
.withOutputDir(storedir)
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
|||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheStats;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
|
@ -96,8 +97,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||
conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri);
|
||||
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(2 * 1024);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(2*1024).build();
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||
.withFilePath(regionFs.createTempName())
|
||||
.withFileContext(meta)
|
||||
|
@ -148,8 +148,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||
conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri);
|
||||
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(8 * 1024);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
|
||||
// Make a store file and write data to it.
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||
.withFilePath(regionFs.createTempName())
|
||||
|
@ -194,8 +193,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
FSUtils.setRootDir(testConf, this.testDir);
|
||||
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(8 * 1024);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
|
||||
|
||||
// Make a store file and write data to it.
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||
|
@ -240,8 +238,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);
|
||||
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(8 * 1024);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
|
||||
// Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs)
|
||||
.withFilePath(regionFs.createTempName())
|
||||
|
@ -503,10 +500,9 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
|
||||
// write the file
|
||||
Path f = new Path(ROOT_DIR, getName());
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(StoreFile.DEFAULT_BLOCKSIZE_SMALL);
|
||||
meta.setChecksumType(CKTYPE);
|
||||
meta.setBytesPerChecksum(CKBYTES);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL)
|
||||
.withChecksumType(CKTYPE)
|
||||
.withBytesPerCheckSum(CKBYTES).build();
|
||||
// Make a store file and write data to it.
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||
.withFilePath(f)
|
||||
|
@ -526,10 +522,10 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
// write the file
|
||||
Path f = new Path(ROOT_DIR, getName());
|
||||
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(StoreFile.DEFAULT_BLOCKSIZE_SMALL);
|
||||
meta.setChecksumType(CKTYPE);
|
||||
meta.setBytesPerChecksum(CKBYTES);
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL)
|
||||
.withChecksumType(CKTYPE)
|
||||
.withBytesPerCheckSum(CKBYTES).build();
|
||||
// Make a store file and write data to it.
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||
.withFilePath(f)
|
||||
|
@ -582,8 +578,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
public void testReseek() throws Exception {
|
||||
// write the file
|
||||
Path f = new Path(ROOT_DIR, getName());
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(8 * 1024);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
|
||||
// Make a store file and write data to it.
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||
.withFilePath(f)
|
||||
|
@ -626,10 +621,9 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
for (int x : new int[]{0,1}) {
|
||||
// write the file
|
||||
Path f = new Path(ROOT_DIR, getName() + x);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(StoreFile.DEFAULT_BLOCKSIZE_SMALL);
|
||||
meta.setChecksumType(CKTYPE);
|
||||
meta.setBytesPerChecksum(CKBYTES);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL)
|
||||
.withChecksumType(CKTYPE)
|
||||
.withBytesPerCheckSum(CKBYTES).build();
|
||||
// Make a store file and write data to it.
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||
.withFilePath(f)
|
||||
|
@ -782,8 +776,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
|
||||
Path storedir = new Path(new Path(this.testDir, "7e0102"), "familyname");
|
||||
Path dir = new Path(storedir, "1234567890");
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(8 * 1024);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
|
||||
// Make a store file and write data to it.
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||
.withOutputDir(dir)
|
||||
|
@ -969,10 +962,10 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
totalSize += kv.getLength() + 1;
|
||||
}
|
||||
int blockSize = totalSize / numBlocks;
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(blockSize);
|
||||
meta.setChecksumType(CKTYPE);
|
||||
meta.setBytesPerChecksum(CKBYTES);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(blockSize)
|
||||
.withChecksumType(CKTYPE)
|
||||
.withBytesPerCheckSum(CKBYTES)
|
||||
.build();
|
||||
// Make a store file and write data to it.
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||
.withFilePath(path)
|
||||
|
@ -1005,12 +998,12 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
dataBlockEncoderAlgo,
|
||||
dataBlockEncoderAlgo);
|
||||
cacheConf = new CacheConfig(conf);
|
||||
HFileContext meta = new HFileContext();
|
||||
meta.setBlocksize(HConstants.DEFAULT_BLOCKSIZE);
|
||||
meta.setChecksumType(CKTYPE);
|
||||
meta.setBytesPerChecksum(CKBYTES);
|
||||
meta.setEncodingOnDisk(dataBlockEncoderAlgo);
|
||||
meta.setEncodingInCache(dataBlockEncoderAlgo);
|
||||
HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL)
|
||||
.withChecksumType(CKTYPE)
|
||||
.withBytesPerCheckSum(CKBYTES)
|
||||
.withDataBlockEncodingInCache(dataBlockEncoderAlgo)
|
||||
.withDataBlockEncodingOnDisk(dataBlockEncoderAlgo)
|
||||
.build();
|
||||
// Make a store file and write data to it.
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
|
||||
.withFilePath(path)
|
||||
|
|
|
@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
|
|||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
|
||||
|
@ -327,7 +328,7 @@ public class TestWALReplay {
|
|||
HLog wal = createWAL(this.conf);
|
||||
HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);
|
||||
Path f = new Path(basedir, "hfile");
|
||||
HFileContext context = new HFileContext();
|
||||
HFileContext context = new HFileContextBuilder().build();
|
||||
HFile.Writer writer =
|
||||
HFile.getWriterFactoryNoCache(conf).withPath(fs, f)
|
||||
.withFileContext(context).create();
|
||||
|
|
|
@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
|
|||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
|
||||
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
|
@ -843,7 +844,7 @@ public class TestAccessController extends SecureTestUtil {
|
|||
HFile.Writer writer = null;
|
||||
long now = System.currentTimeMillis();
|
||||
try {
|
||||
HFileContext context = new HFileContext();
|
||||
HFileContext context = new HFileContextBuilder().build();
|
||||
writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
|
||||
.withPath(fs, path)
|
||||
.withFileContext(context)
|
||||
|
|
Loading…
Reference in New Issue