From 7dd240c8adb98376b38f89c4135d919f7bd97b1d Mon Sep 17 00:00:00 2001 From: ramkrishna Date: Wed, 9 Oct 2013 11:21:03 +0000 Subject: [PATCH] HBASE-9546-HFileContext should adopt Builder pattern (Ram) git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1530567 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/hbase/io/hfile/HFileContext.java | 68 +++++----- .../hbase/io/hfile/HFileContextBuilder.java | 107 ++++++++++++++++ .../hadoop/hbase/io/hfile/HFileBlock.java | 28 ++-- .../io/hfile/HFileDataBlockEncoderImpl.java | 8 +- .../hadoop/hbase/io/hfile/HFileReaderV2.java | 12 +- .../hadoop/hbase/io/hfile/HFileReaderV3.java | 15 ++- .../hadoop/hbase/io/hfile/HFileWriterV2.java | 6 - .../hadoop/hbase/io/hfile/HFileWriterV3.java | 3 +- .../hbase/mapreduce/HFileOutputFormat.java | 24 ++-- .../mapreduce/LoadIncrementalHFiles.java | 19 +-- .../hadoop/hbase/regionserver/HStore.java | 21 +-- .../hadoop/hbase/util/CompressionTest.java | 9 +- .../hbase/HFilePerformanceEvaluation.java | 5 +- .../TestRegionObserverInterface.java | 11 +- .../hbase/io/TestHalfStoreFileReader.java | 8 +- .../io/encoding/TestDataBlockEncoders.java | 45 ++++--- .../io/encoding/TestPrefixTreeEncoding.java | 44 ++++--- .../hadoop/hbase/io/hfile/CacheTestUtils.java | 16 +-- .../hbase/io/hfile/TestCacheOnWrite.java | 11 +- .../hadoop/hbase/io/hfile/TestChecksum.java | 65 +++++----- .../hadoop/hbase/io/hfile/TestHFile.java | 23 ++-- .../hadoop/hbase/io/hfile/TestHFileBlock.java | 120 +++++++++--------- .../io/hfile/TestHFileBlockCompatibility.java | 45 ++++--- .../hbase/io/hfile/TestHFileBlockIndex.java | 33 ++--- .../io/hfile/TestHFileDataBlockEncoder.java | 29 ++--- .../TestHFileInlineToRootChunkConversion.java | 7 +- .../hbase/io/hfile/TestHFilePerformance.java | 6 +- .../hadoop/hbase/io/hfile/TestHFileSeek.java | 7 +- .../hbase/io/hfile/TestHFileWriterV2.java | 19 +-- .../hbase/io/hfile/TestHFileWriterV3.java | 16 +-- .../hadoop/hbase/io/hfile/TestReseekTo.java | 3 +- .../hadoop/hbase/io/hfile/TestSeekTo.java | 10 +- .../mapreduce/TestLoadIncrementalHFiles.java | 27 ++-- .../regionserver/CreateRandomStoreFile.java | 7 +- .../regionserver/DataBlockEncodingTool.java | 11 +- .../regionserver/TestCompoundBloomFilter.java | 12 +- .../regionserver/TestFSErrorsExposed.java | 15 ++- .../TestHRegionServerBulkLoad.java | 19 ++- .../hadoop/hbase/regionserver/TestStore.java | 4 +- .../hbase/regionserver/TestStoreFile.java | 61 ++++----- .../hbase/regionserver/wal/TestWALReplay.java | 3 +- .../security/access/TestAccessController.java | 3 +- 42 files changed, 577 insertions(+), 428 deletions(-) create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java index 04fb5a531a9..51f7afb43fd 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java @@ -59,27 +59,52 @@ public class HFileContext implements HeapSize, Cloneable { //Empty constructor. Go with setters public HFileContext() { } + /** + * Copy constructor + * @param context + */ + public HFileContext(HFileContext context) { + this.usesHBaseChecksum = context.usesHBaseChecksum; + this.includesMvcc = context.includesMvcc; + this.includesTags = context.includesTags; + this.compressAlgo = context.compressAlgo; + this.compressTags = context.compressTags; + this.checksumType = context.checksumType; + this.bytesPerChecksum = context.bytesPerChecksum; + this.blocksize = context.blocksize; + this.encodingOnDisk = context.encodingOnDisk; + this.encodingInCache = context.encodingInCache; + } + + public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags, + Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType, + int bytesPerChecksum, int blockSize, DataBlockEncoding encodingOnDisk, + DataBlockEncoding encodingInCache) { + this.usesHBaseChecksum = useHBaseChecksum; + this.includesMvcc = includesMvcc; + this.includesTags = includesTags; + this.compressAlgo = compressAlgo; + this.compressTags = compressTags; + this.checksumType = checksumType; + this.bytesPerChecksum = bytesPerChecksum; + this.blocksize = blockSize; + this.encodingOnDisk = encodingOnDisk; + this.encodingInCache = encodingInCache; + } public Algorithm getCompression() { return compressAlgo; } - public void setCompressAlgo(Algorithm compressAlgo) { - this.compressAlgo = compressAlgo; - } - public boolean shouldUseHBaseChecksum() { return usesHBaseChecksum; } - public void setUsesHBaseChecksum(boolean usesHBaseChecksum) { - this.usesHBaseChecksum = usesHBaseChecksum; - } - public boolean shouldIncludeMvcc() { return includesMvcc; } + // TODO : This setter should be removed public void setIncludesMvcc(boolean includesMvcc) { this.includesMvcc = includesMvcc; } @@ -88,6 +113,7 @@ public class HFileContext implements HeapSize, Cloneable { return includesTags; } + // TODO : This setter should be removed? public void setIncludesTags(boolean includesTags) { this.includesTags = includesTags; } @@ -96,50 +122,26 @@ public class HFileContext implements HeapSize, Cloneable { return compressTags; } - public void setCompressTags(boolean compressTags) { - this.compressTags = compressTags; - } - public ChecksumType getChecksumType() { return checksumType; } - public void setChecksumType(ChecksumType checksumType) { - this.checksumType = checksumType; - } - public int getBytesPerChecksum() { return bytesPerChecksum; } - public void setBytesPerChecksum(int bytesPerChecksum) { - this.bytesPerChecksum = bytesPerChecksum; - } - public int getBlocksize() { return blocksize; } - public void setBlocksize(int blocksize) { - this.blocksize = blocksize; - } - public DataBlockEncoding getEncodingOnDisk() { return encodingOnDisk; } - public void setEncodingOnDisk(DataBlockEncoding encodingOnDisk) { - this.encodingOnDisk = encodingOnDisk; - } - public DataBlockEncoding getEncodingInCache() { return encodingInCache; } - public void setEncodingInCache(DataBlockEncoding encodingInCache) { - this.encodingInCache = encodingInCache; - } - /** * HeapSize implementation * NOTE : The heapsize should be altered as and when new state variable are added @@ -171,4 +173,4 @@ public class HFileContext implements HeapSize, Cloneable { clonnedCtx.encodingInCache = this.encodingInCache; return clonnedCtx; } -} \ No newline at end of file +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java new file mode 100644 index 00000000000..dcd3e85a7e2 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.util.ChecksumType; +/** + * A builder that helps in building up the HFileContext + */ +@InterfaceAudience.Private +public class HFileContextBuilder { + + public static final int DEFAULT_BYTES_PER_CHECKSUM = 16 * 1024; + public static final ChecksumType DEFAULT_CHECKSUM_TYPE = ChecksumType.CRC32; + + /** Whether checksum is enabled or not **/ + private boolean usesHBaseChecksum = true; + /** Whether mvcc is to be included in the Read/Write **/ + private boolean includesMvcc = true; + /** Whether tags are to be included in the Read/Write **/ + private boolean includesTags; + /** Compression algorithm used **/ + private Algorithm compressAlgo = Algorithm.NONE; + /** Whether tags to be compressed or not **/ + private boolean compressTags; + /** the checksum type **/ + private ChecksumType checksumType = DEFAULT_CHECKSUM_TYPE; + /** the number of bytes per checksum value **/ + private int bytesPerChecksum = DEFAULT_BYTES_PER_CHECKSUM; + /** Number of uncompressed bytes we allow per block. */ + private int blocksize = HConstants.DEFAULT_BLOCKSIZE; + private DataBlockEncoding encodingOnDisk = DataBlockEncoding.NONE; + private DataBlockEncoding encodingInCache = DataBlockEncoding.NONE; + + public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) { + this.usesHBaseChecksum = useHBaseCheckSum; + return this; + } + + public HFileContextBuilder withIncludesMvcc(boolean includesMvcc) { + this.includesMvcc = includesMvcc; + return this; + } + + public HFileContextBuilder withIncludesTags(boolean includesTags) { + this.includesTags = includesTags; + return this; + } + + public HFileContextBuilder withCompressionAlgo(Algorithm compressionAlgo) { + this.compressAlgo = compressionAlgo; + return this; + } + + public HFileContextBuilder withCompressTags(boolean compressTags) { + this.compressTags = compressTags; + return this; + } + + public HFileContextBuilder withChecksumType(ChecksumType checkSumType) { + this.checksumType = checkSumType; + return this; + } + + public HFileContextBuilder withBytesPerCheckSum(int bytesPerChecksum) { + this.bytesPerChecksum = bytesPerChecksum; + return this; + } + + public HFileContextBuilder withBlockSize(int blockSize) { + this.blocksize = blockSize; + return this; + } + + public HFileContextBuilder withDataBlockEncodingOnDisk(DataBlockEncoding encodingOnDisk) { + this.encodingOnDisk = encodingOnDisk; + return this; + } + + public HFileContextBuilder withDataBlockEncodingInCache(DataBlockEncoding encodingInCache) { + this.encodingInCache = encodingInCache; + return this; + } + + public HFileContext build() { + return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compressAlgo, + compressTags, checksumType, bytesPerChecksum, blocksize, encodingOnDisk, encodingInCache); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index b05666d64e9..2169bf7abdf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -239,18 +239,19 @@ public class HFileBlock implements Cacheable { onDiskSizeWithoutHeader = b.getInt(); uncompressedSizeWithoutHeader = b.getInt(); prevBlockOffset = b.getLong(); - this.fileContext = new HFileContext(); - this.fileContext.setUsesHBaseChecksum(usesHBaseChecksum); + HFileContextBuilder contextBuilder = new HFileContextBuilder(); + contextBuilder.withHBaseCheckSum(usesHBaseChecksum); if (usesHBaseChecksum) { - this.fileContext.setChecksumType(ChecksumType.codeToType(b.get())); - this.fileContext.setBytesPerChecksum(b.getInt()); + contextBuilder.withChecksumType(ChecksumType.codeToType(b.get())); + contextBuilder.withBytesPerCheckSum(b.getInt()); this.onDiskDataSizeWithHeader = b.getInt(); } else { - this.fileContext.setChecksumType(ChecksumType.NULL); - this.fileContext.setBytesPerChecksum(0); + contextBuilder.withChecksumType(ChecksumType.NULL); + contextBuilder.withBytesPerCheckSum(0); this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader + HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; } + this.fileContext = contextBuilder.build(); buf = b; buf.rewind(); } @@ -1019,9 +1020,18 @@ public class HFileBlock implements Cacheable { * 0 value in bytesPerChecksum. */ public HFileBlock getBlockForCaching() { - HFileContext newContext = fileContext.clone(); - newContext.setBytesPerChecksum(0); - newContext.setChecksumType(ChecksumType.NULL); // no checksums in cached data + HFileContext newContext = new HFileContextBuilder() + .withBlockSize(fileContext.getBlocksize()) + .withBytesPerCheckSum(0) + .withChecksumType(ChecksumType.NULL) // no checksums in cached data + .withCompressionAlgo(fileContext.getCompression()) + .withDataBlockEncodingInCache(fileContext.getEncodingInCache()) + .withDataBlockEncodingOnDisk(fileContext.getEncodingOnDisk()) + .withHBaseCheckSum(fileContext.shouldUseHBaseChecksum()) + .withCompressTags(fileContext.shouldCompressTags()) + .withIncludesMvcc(fileContext.shouldIncludeMvcc()) + .withIncludesTags(fileContext.shouldIncludeTags()) + .build(); return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(), getUncompressedSizeWithoutHeader(), prevOffset, getUncompressedBufferWithHeader(), DONT_FILL_HEADER, startOffset, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java index 51f4b58d364..3e219008975 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java @@ -245,14 +245,14 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { * See HBASE-8732 * @return a new in cache encoding context */ - private HFileBlockEncodingContext createInCacheEncodingContext(HFileContext meta) { - HFileContext newMeta = meta.clone(); + private HFileBlockEncodingContext createInCacheEncodingContext(HFileContext fileContext) { + HFileContext newContext = new HFileContext(fileContext); return (inCache != DataBlockEncoding.NONE) ? this.inCache.getEncoder().newDataBlockEncodingContext( - this.inCache, dummyHeader, newMeta) + this.inCache, dummyHeader, newContext) : // create a default encoding context - new HFileBlockDefaultEncodingContext(this.inCache, dummyHeader, newMeta); + new HFileBlockDefaultEncodingContext(this.inCache, dummyHeader, newContext); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index 3e478be721c..f767ed1cd31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -179,12 +179,12 @@ public class HFileReaderV2 extends AbstractHFileReader { } protected HFileContext createHFileContext(FixedFileTrailer trailer) { - HFileContext meta = new HFileContext(); - meta.setIncludesMvcc(this.includesMemstoreTS); - meta.setUsesHBaseChecksum( - trailer.getMinorVersion() >= MINOR_VERSION_WITH_CHECKSUM); - meta.setCompressAlgo(this.compressAlgo); - return meta; + HFileContext hFileContext = new HFileContextBuilder() + .withIncludesMvcc(this.includesMemstoreTS) + .withCompressionAlgo(this.compressAlgo) + .withHBaseCheckSum(trailer.getMinorVersion() >= MINOR_VERSION_WITH_CHECKSUM) + .build(); + return hFileContext; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java index cec92958516..6f9501647c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java @@ -64,12 +64,13 @@ public class HFileReaderV3 extends HFileReaderV2 { @Override protected HFileContext createHFileContext(FixedFileTrailer trailer) { - HFileContext meta = new HFileContext(); - meta.setIncludesMvcc(this.includesMemstoreTS); - meta.setUsesHBaseChecksum(true); - meta.setCompressAlgo(this.compressAlgo); - meta.setIncludesTags(true); - return meta; + HFileContext hfileContext = new HFileContextBuilder() + .withIncludesMvcc(this.includesMemstoreTS) + .withHBaseCheckSum(true) + .withCompressionAlgo(this.compressAlgo) + .withIncludesTags(true) + .build(); + return hfileContext; } /** @@ -273,4 +274,4 @@ public class HFileReaderV3 extends HFileReaderV2 { protected HFileBlock diskToCacheFormat(HFileBlock hfileBlock, final boolean isCompaction) { return dataBlockEncoder.diskToCacheFormat(hfileBlock, isCompaction); } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java index 5c37ca5ee1b..aa336143b93 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java @@ -34,11 +34,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFile.Writer; import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable; -import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; @@ -135,9 +132,6 @@ public class HFileWriterV2 extends AbstractHFileWriter { protected HFileBlock.Writer createBlockWriter() { // HFile filesystem-level (non-caching) block writer hFileContext.setIncludesTags(false); - // This can be set while the write is created itself because - // in both cases useHBaseChecksum is going to be true - hFileContext.setUsesHBaseChecksum(true); return new HFileBlock.Writer(blockEncoder, hFileContext); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java index 101abeb2084..3cfd6e0ea92 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java @@ -186,7 +186,6 @@ public class HFileWriterV3 extends HFileWriterV2 { protected HFileBlock.Writer createBlockWriter() { // HFile filesystem-level (non-caching) block writer hFileContext.setIncludesTags(true); - hFileContext.setUsesHBaseChecksum(true); return new HFileBlock.Writer(blockEncoder, hFileContext); } @@ -199,4 +198,4 @@ public class HFileWriterV3 extends HFileWriterV2 { protected int getMinorVersion() { return HFileReaderV3.MAX_MINOR_VERSION; } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java index 9cb9d03e818..f52ef341a49 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java @@ -49,9 +49,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileContext; -import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; -import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; -import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFile; @@ -194,18 +192,20 @@ public class HFileOutputFormat extends FileOutputFormat prevOffsetByType = new HashMap(); long totalSize = 0; @@ -839,14 +846,13 @@ public class TestHFileBlock { for (int size : new int[] { 100, 256, 12345 }) { byte[] byteArr = new byte[HConstants.HFILEBLOCK_HEADER_SIZE + size]; ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size); - HFileContext meta = new HFileContext(); - meta.setIncludesMvcc(includesMemstoreTS); - meta.setIncludesTags(includesTag); - meta.setUsesHBaseChecksum(false); - meta.setCompressAlgo(Algorithm.NONE); - meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM); - meta.setChecksumType(ChecksumType.NULL); - meta.setBytesPerChecksum(0); + HFileContext meta = new HFileContextBuilder() + .withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTag) + .withHBaseCheckSum(false) + .withCompressionAlgo(Algorithm.NONE) + .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) + .withChecksumType(ChecksumType.NULL).build(); HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf, HFileBlock.FILL_HEADER, -1, 0, meta); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java index 7b2cac5ab35..ac33a5d73ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java @@ -42,13 +42,12 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; -import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable; -import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.io.compress.Compressor; @@ -198,11 +197,12 @@ public class TestHFileBlockCompatibility { os.close(); FSDataInputStream is = fs.open(path); - HFileContext meta = new HFileContext(); - meta.setUsesHBaseChecksum(false); - meta.setIncludesMvcc(includesMemstoreTS); - meta.setIncludesTags(includesTag); - meta.setCompressAlgo(algo); + HFileContext meta = new HFileContextBuilder() + .withHBaseCheckSum(false) + .withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTag) + .withCompressionAlgo(algo) + .build(); HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is), totalSize, fs, path, meta); HFileBlock b = hbr.readBlockData(0, -1, -1, pread); @@ -280,11 +280,12 @@ public class TestHFileBlockCompatibility { os.close(); FSDataInputStream is = fs.open(path); - HFileContext meta = new HFileContext(); - meta.setUsesHBaseChecksum(false); - meta.setIncludesMvcc(includesMemstoreTS); - meta.setIncludesTags(includesTag); - meta.setCompressAlgo(algo); + HFileContext meta = new HFileContextBuilder() + .withHBaseCheckSum(false) + .withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTag) + .withCompressionAlgo(algo) + .build(); HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is), totalSize, fs, path, meta); hbr.setDataBlockEncoder(dataBlockEncoder); @@ -420,12 +421,12 @@ public class TestHFileBlockCompatibility { this.dataBlockEncoder = dataBlockEncoder != null ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE; - meta = new HFileContext(); - meta.setUsesHBaseChecksum(false); - meta.setIncludesMvcc(includesMemstoreTS); - meta.setIncludesTags(includesTag); - meta.setCompressAlgo(compressionAlgorithm); - + meta = new HFileContextBuilder() + .withHBaseCheckSum(false) + .withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTag) + .withCompressionAlgo(compressionAlgorithm) + .build(); defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null, DUMMY_HEADER, meta); dataBlockEncodingCtx = this.dataBlockEncoder.newOnDiskDataBlockEncodingContext( @@ -730,9 +731,11 @@ public class TestHFileBlockCompatibility { * Creates a new HFileBlock. */ public HFileBlock getBlockForCaching() { - meta.setUsesHBaseChecksum(false); - meta.setChecksumType(ChecksumType.NULL); - meta.setBytesPerChecksum(0); + HFileContext meta = new HFileContextBuilder() + .withHBaseCheckSum(false) + .withChecksumType(ChecksumType.NULL) + .withBytesPerCheckSum(0) + .build(); return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(), getUncompressedSizeWithoutHeader(), prevOffset, getUncompressedBufferWithHeader(), DONT_FILL_HEADER, startOffset, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java index 7101291bba7..a7187f6e756 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java @@ -188,11 +188,12 @@ public class TestHFileBlockIndex { LOG.info("Size of " + path + ": " + fileSize); FSDataInputStream istream = fs.open(path); - HFileContext meta = new HFileContext(); - meta.setUsesHBaseChecksum(true); - meta.setIncludesMvcc(includesMemstoreTS); - meta.setIncludesTags(useTags); - meta.setCompressAlgo(compr); + HFileContext meta = new HFileContextBuilder() + .withHBaseCheckSum(true) + .withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(useTags) + .withCompressionAlgo(compr) + .build(); HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream, fs.getFileStatus(path) .getLen(), meta); @@ -241,13 +242,14 @@ public class TestHFileBlockIndex { private void writeWholeIndex(boolean useTags) throws IOException { assertEquals(0, keys.size()); - HFileContext meta = new HFileContext(); - meta.setUsesHBaseChecksum(true); - meta.setIncludesMvcc(includesMemstoreTS); - meta.setIncludesTags(useTags); - meta.setCompressAlgo(compr); - meta.setChecksumType(HFile.DEFAULT_CHECKSUM_TYPE); - meta.setBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM); + HFileContext meta = new HFileContextBuilder() + .withHBaseCheckSum(true) + .withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(useTags) + .withCompressionAlgo(compr) + .withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE) + .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) + .build(); HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta); FSDataOutputStream outputStream = fs.create(path); @@ -516,9 +518,10 @@ public class TestHFileBlockIndex { // Write the HFile { - HFileContext meta = new HFileContext(); - meta.setBlocksize(SMALL_BLOCK_SIZE); - meta.setCompressAlgo(compr); + HFileContext meta = new HFileContextBuilder() + .withBlockSize(SMALL_BLOCK_SIZE) + .withCompressionAlgo(compr) + .build(); HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf) .withPath(fs, hfilePath) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java index 4e48ed9df89..e706f2c2224 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.io.HeapSize; -import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; @@ -127,13 +126,12 @@ public class TestHFileDataBlockEncoder { buf.position(headerSize); keyValues.rewind(); buf.put(keyValues); - HFileContext meta = new HFileContext(); - meta.setUsesHBaseChecksum(false); - meta.setIncludesMvcc(includesMemstoreTS); - meta.setIncludesTags(useTags); - meta.setCompressAlgo(Compression.Algorithm.NONE); - meta.setBlocksize(0); - meta.setChecksumType(ChecksumType.NULL); + HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false) + .withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(useTags) + .withBlockSize(0) + .withChecksumType(ChecksumType.NULL) + .build(); HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf, HFileBlock.FILL_HEADER, 0, 0, meta); @@ -203,13 +201,14 @@ public class TestHFileDataBlockEncoder { buf.position(HConstants.HFILEBLOCK_HEADER_SIZE); keyValues.rewind(); buf.put(keyValues); - HFileContext meta = new HFileContext(); - meta.setIncludesMvcc(includesMemstoreTS); - meta.setIncludesTags(useTag); - meta.setUsesHBaseChecksum(true); - meta.setCompressAlgo(Algorithm.NONE); - meta.setBlocksize(0); - meta.setChecksumType(ChecksumType.NULL); + HFileContext meta = new HFileContextBuilder() + .withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(useTag) + .withHBaseCheckSum(true) + .withCompressionAlgo(Algorithm.NONE) + .withBlockSize(0) + .withChecksumType(ChecksumType.NULL) + .build(); HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf, HFileBlock.FILL_HEADER, 0, 0, meta); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java index c850d3a1a43..b4aef0ae904 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java @@ -23,10 +23,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.experimental.categories.Category; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; +import org.junit.experimental.categories.Category; /** * Test a case when an inline index chunk is converted to a root one. This reproduces the bug in @@ -52,8 +52,7 @@ public class TestHFileInlineToRootChunkConversion { FileSystem fs = FileSystem.get(conf); CacheConfig cacheConf = new CacheConfig(conf); conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize); - HFileContext context = new HFileContext(); - context.setBlocksize(16); + HFileContext context = new HFileContextBuilder().withBlockSize(16).build(); HFileWriterV2 hfw = (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf) .withFileContext(context) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java index 9e1dd19b3ef..3ccfec2fc08 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java @@ -161,9 +161,9 @@ public class TestHFilePerformance extends TestCase { FSDataOutputStream fout = createFSOutput(path); if ("HFile".equals(fileType)){ - HFileContext meta = new HFileContext(); - meta.setCompressAlgo(AbstractHFileWriter.compressionByName(codecName)); - meta.setBlocksize(minBlockSize); + HFileContext meta = new HFileContextBuilder() + .withCompressionAlgo(AbstractHFileWriter.compressionByName(codecName)) + .withBlockSize(minBlockSize).build(); System.out.println("HFile write method: "); HFile.Writer writer = HFile.getWriterFactoryNoCache(conf) .withOutputStream(fout) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java index 1a26bb55844..305d44793e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java @@ -127,9 +127,10 @@ public class TestHFileSeek extends TestCase { long totalBytes = 0; FSDataOutputStream fout = createFSOutput(path, fs); try { - HFileContext context = new HFileContext(); - context.setBlocksize(options.minBlockSize); - context.setCompressAlgo(AbstractHFileWriter.compressionByName(options.compress)); + HFileContext context = new HFileContextBuilder() + .withBlockSize(options.minBlockSize) + .withCompressionAlgo(AbstractHFileWriter.compressionByName(options.compress)) + .build(); Writer writer = HFile.getWriterFactoryNoCache(conf) .withOutputStream(fout) .withFileContext(context) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java index f01b5c9dede..27850025916 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; -import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; @@ -94,9 +93,10 @@ public class TestHFileWriterV2 { private void writeDataAndReadFromHFile(Path hfilePath, Algorithm compressAlgo, int entryCount, boolean findMidKey) throws IOException { - HFileContext context = new HFileContext(); - context.setBlocksize(4096); - context.setCompressAlgo(compressAlgo); + HFileContext context = new HFileContextBuilder() + .withBlockSize(4096) + .withCompressionAlgo(compressAlgo) + .build(); HFileWriterV2 writer = (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, new CacheConfig(conf)) .withPath(fs, hfilePath) @@ -137,11 +137,12 @@ public class TestHFileWriterV2 { assertEquals(2, trailer.getMajorVersion()); assertEquals(entryCount, trailer.getEntryCount()); - HFileContext meta = new HFileContext(); - meta.setUsesHBaseChecksum(true); - meta.setIncludesMvcc(false); - meta.setIncludesTags(false); - meta.setCompressAlgo(compressAlgo); + HFileContext meta = new HFileContextBuilder() + .withHBaseCheckSum(true) + .withIncludesMvcc(false) + .withIncludesTags(false) + .withCompressionAlgo(compressAlgo) + .build(); HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(fsdis, fileSize, meta); // Comparator class name is stored in the trailer in version 2. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java index 183ff443324..a5b375066a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java @@ -115,9 +115,9 @@ public class TestHFileWriterV3 { private void writeDataAndReadFromHFile(Path hfilePath, Algorithm compressAlgo, int entryCount, boolean findMidKey, boolean useTags) throws IOException { - HFileContext context = new HFileContext(); - context.setBlocksize(4096); - context.setCompressAlgo(compressAlgo); + HFileContext context = new HFileContextBuilder() + .withBlockSize(4096) + .withCompressionAlgo(compressAlgo).build(); HFileWriterV3 writer = (HFileWriterV3) new HFileWriterV3.WriterFactoryV3(conf, new CacheConfig(conf)) .withPath(fs, hfilePath) @@ -168,11 +168,11 @@ public class TestHFileWriterV3 { assertEquals(3, trailer.getMajorVersion()); assertEquals(entryCount, trailer.getEntryCount()); - HFileContext meta = new HFileContext(); - meta.setCompressAlgo(compressAlgo); - meta.setIncludesMvcc(false); - meta.setIncludesTags(useTags); - meta.setUsesHBaseChecksum(true); + HFileContext meta = new HFileContextBuilder() + .withCompressionAlgo(compressAlgo) + .withIncludesMvcc(false) + .withIncludesTags(useTags) + .withHBaseCheckSum(true).build(); HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(fsdis, fileSize, meta); // Comparator class name is stored in the trailer in version 2. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java index d905f8ba2d3..cf483c3a54c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java @@ -57,8 +57,7 @@ public class TestReseekTo { TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); } CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); - HFileContext context = new HFileContext(); - context.setBlocksize(4000); + HFileContext context = new HFileContextBuilder().withBlockSize(4000).build(); HFile.Writer writer = HFile.getWriterFactory( TEST_UTIL.getConfiguration(), cacheConf) .withOutputStream(fout) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java index 973124f78f6..3c7af4171d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java @@ -22,9 +22,12 @@ import java.io.IOException; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestCase; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.RawComparator; import org.junit.experimental.categories.Category; /** @@ -73,8 +76,7 @@ public class TestSeekTo extends HBaseTestCase { } FSDataOutputStream fout = this.fs.create(ncTFile); int blocksize = toKV("a", tagUsage).getLength() * 3; - HFileContext context = new HFileContext(); - context.setBlocksize(blocksize); + HFileContext context = new HFileContextBuilder().withBlockSize(blocksize).build(); HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withOutputStream(fout) .withFileContext(context) // NOTE: This test is dependent on this deprecated nonstandard diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java index f3aa15625f0..e4d0134d326 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java @@ -18,10 +18,22 @@ */ package org.apache.hadoop.hbase.mapreduce; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.TreeMap; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.io.compress.Compression; @@ -29,6 +41,7 @@ import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.StoreFile; @@ -38,11 +51,6 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -import java.io.IOException; -import java.util.TreeMap; - -import static org.junit.Assert.*; - /** * Test cases for the "load" half of the HFileOutputFormat bulk load * functionality. These tests run faster than the full MR cluster @@ -262,9 +270,10 @@ public class TestLoadIncrementalHFiles { byte[] family, byte[] qualifier, byte[] startKey, byte[] endKey, int numRows) throws IOException { - HFileContext meta = new HFileContext(); - meta.setBlocksize(BLOCKSIZE); - meta.setCompressAlgo(COMPRESSION); + HFileContext meta = new HFileContextBuilder() + .withBlockSize(BLOCKSIZE) + .withCompressionAlgo(COMPRESSION) + .build(); HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration)) .withPath(fs, path) .withFileContext(meta) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java index 501bcf4ce43..395b78513e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java @@ -38,9 +38,9 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex; import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.io.BytesWritable; @@ -183,9 +183,8 @@ public class CreateRandomStoreFile { Integer.valueOf(cmdLine.getOptionValue(INDEX_BLOCK_SIZE_OPTION))); } - HFileContext meta = new HFileContext(); - meta.setCompressAlgo(compr); - meta.setBlocksize(blockSize); + HFileContext meta = new HFileContextBuilder().withCompressionAlgo(compr) + .withBlockSize(blockSize).build(); StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, new CacheConfig(conf), fs) .withOutputDir(outputDir) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index 245fcca1fc3..4f7f5345e8b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -45,9 +45,10 @@ import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.EncodedDataBlock; -import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileBlock; +import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileReaderV2; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; import org.apache.hadoop.hbase.util.Bytes; @@ -214,10 +215,10 @@ public class DataBlockEncodingTool { continue; } DataBlockEncoder d = encoding.getEncoder(); - HFileContext meta = new HFileContext(); - meta.setCompressAlgo(Compression.Algorithm.NONE); - meta.setIncludesMvcc(includesMemstoreTS); - meta.setIncludesTags(useTag); + HFileContext meta = new HFileContextBuilder() + .withCompressionAlgo(Compression.Algorithm.NONE) + .withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(useTag).build(); codecs.add(new EncodedDataBlock(d, encoding, rawKVs, meta )); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java index 8e95e49ac8f..a39d23ec062 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java @@ -36,13 +36,16 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.io.hfile.HFileContext; -import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; +import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.ByteBloomFilter; @@ -292,8 +295,7 @@ public class TestCompoundBloomFilter { BLOOM_BLOCK_SIZES[t]); conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true); cacheConf = new CacheConfig(conf); - HFileContext meta = new HFileContext(); - meta.setBlocksize(BLOCK_SIZES[t]); + HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCK_SIZES[t]).build(); StoreFile.Writer w = new StoreFile.WriterBuilder(conf, cacheConf, fs) .withOutputDir(TEST_UTIL.getDataTestDir()) .withBloomType(bt) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java index 873476469e7..02a9c61bc63 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java @@ -37,12 +37,19 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PositionedReadable; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; import org.apache.hadoop.hbase.util.Bytes; @@ -73,8 +80,7 @@ public class TestFSErrorsExposed { FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs()); FileSystem fs = new HFileSystem(faultyfs); CacheConfig cacheConf = new CacheConfig(util.getConfiguration()); - HFileContext meta = new HFileContext(); - meta.setBlocksize(2*1024); + HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build(); StoreFile.Writer writer = new StoreFile.WriterBuilder( util.getConfiguration(), cacheConf, hfs) .withOutputDir(hfilePath) @@ -125,8 +131,7 @@ public class TestFSErrorsExposed { FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs()); HFileSystem fs = new HFileSystem(faultyfs); CacheConfig cacheConf = new CacheConfig(util.getConfiguration()); - HFileContext meta = new HFileContext(); - meta.setBlocksize(2 * 1024); + HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build(); StoreFile.Writer writer = new StoreFile.WriterBuilder( util.getConfiguration(), cacheConf, hfs) .withOutputDir(hfilePath) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index adf06f4e485..e6d2a8318c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -27,9 +27,16 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; +import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.RegionServerCallable; @@ -38,12 +45,12 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.RpcRetryingCaller; import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; @@ -51,9 +58,9 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.junit.Test; +import org.junit.experimental.categories.Category; import com.google.common.collect.Lists; -import org.junit.experimental.categories.Category; /** * Tests bulk loading of HFiles and shows the atomicity or lack of atomicity of @@ -89,9 +96,9 @@ public class TestHRegionServerBulkLoad { */ public static void createHFile(FileSystem fs, Path path, byte[] family, byte[] qualifier, byte[] value, int numRows) throws IOException { - HFileContext context = new HFileContext(); - context.setBlocksize(BLOCKSIZE); - context.setCompressAlgo(COMPRESSION); + HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE) + .withCompressionAlgo(COMPRESSION) + .build(); HFile.Writer writer = HFile .getWriterFactory(conf, new CacheConfig(conf)) .withPath(fs, path) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index 85d087f90fb..2dd597fc846 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; @@ -321,8 +322,7 @@ public class TestStore extends TestCase { long seqid = f.getMaxSequenceId(); Configuration c = HBaseConfiguration.create(); FileSystem fs = FileSystem.get(c); - HFileContext meta = new HFileContext(); - meta.setBlocksize(StoreFile.DEFAULT_BLOCKSIZE_SMALL); + HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL).build(); StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c), fs) .withOutputDir(storedir) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index cf9ede7bb3f..13587c5fbcc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; import org.apache.hadoop.hbase.io.hfile.HFileScanner; @@ -96,8 +97,7 @@ public class TestStoreFile extends HBaseTestCase { HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri); - HFileContext meta = new HFileContext(); - meta.setBlocksize(2 * 1024); + HFileContext meta = new HFileContextBuilder().withBlockSize(2*1024).build(); StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) .withFileContext(meta) @@ -148,8 +148,7 @@ public class TestStoreFile extends HBaseTestCase { HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri); - HFileContext meta = new HFileContext(); - meta.setBlocksize(8 * 1024); + HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) @@ -194,8 +193,7 @@ public class TestStoreFile extends HBaseTestCase { FSUtils.setRootDir(testConf, this.testDir); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri); - HFileContext meta = new HFileContext(); - meta.setBlocksize(8 * 1024); + HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) @@ -240,8 +238,7 @@ public class TestStoreFile extends HBaseTestCase { HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri); - HFileContext meta = new HFileContext(); - meta.setBlocksize(8 * 1024); + HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. //// StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) @@ -503,10 +500,9 @@ public class TestStoreFile extends HBaseTestCase { // write the file Path f = new Path(ROOT_DIR, getName()); - HFileContext meta = new HFileContext(); - meta.setBlocksize(StoreFile.DEFAULT_BLOCKSIZE_SMALL); - meta.setChecksumType(CKTYPE); - meta.setBytesPerChecksum(CKBYTES); + HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL) + .withChecksumType(CKTYPE) + .withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) .withFilePath(f) @@ -526,10 +522,10 @@ public class TestStoreFile extends HBaseTestCase { // write the file Path f = new Path(ROOT_DIR, getName()); - HFileContext meta = new HFileContext(); - meta.setBlocksize(StoreFile.DEFAULT_BLOCKSIZE_SMALL); - meta.setChecksumType(CKTYPE); - meta.setBytesPerChecksum(CKBYTES); + HFileContext meta = new HFileContextBuilder() + .withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL) + .withChecksumType(CKTYPE) + .withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) .withFilePath(f) @@ -582,8 +578,7 @@ public class TestStoreFile extends HBaseTestCase { public void testReseek() throws Exception { // write the file Path f = new Path(ROOT_DIR, getName()); - HFileContext meta = new HFileContext(); - meta.setBlocksize(8 * 1024); + HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) .withFilePath(f) @@ -626,10 +621,9 @@ public class TestStoreFile extends HBaseTestCase { for (int x : new int[]{0,1}) { // write the file Path f = new Path(ROOT_DIR, getName() + x); - HFileContext meta = new HFileContext(); - meta.setBlocksize(StoreFile.DEFAULT_BLOCKSIZE_SMALL); - meta.setChecksumType(CKTYPE); - meta.setBytesPerChecksum(CKBYTES); + HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL) + .withChecksumType(CKTYPE) + .withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) .withFilePath(f) @@ -782,8 +776,7 @@ public class TestStoreFile extends HBaseTestCase { // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname. Path storedir = new Path(new Path(this.testDir, "7e0102"), "familyname"); Path dir = new Path(storedir, "1234567890"); - HFileContext meta = new HFileContext(); - meta.setBlocksize(8 * 1024); + HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) .withOutputDir(dir) @@ -969,10 +962,10 @@ public class TestStoreFile extends HBaseTestCase { totalSize += kv.getLength() + 1; } int blockSize = totalSize / numBlocks; - HFileContext meta = new HFileContext(); - meta.setBlocksize(blockSize); - meta.setChecksumType(CKTYPE); - meta.setBytesPerChecksum(CKBYTES); + HFileContext meta = new HFileContextBuilder().withBlockSize(blockSize) + .withChecksumType(CKTYPE) + .withBytesPerCheckSum(CKBYTES) + .build(); // Make a store file and write data to it. StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) .withFilePath(path) @@ -1005,12 +998,12 @@ public class TestStoreFile extends HBaseTestCase { dataBlockEncoderAlgo, dataBlockEncoderAlgo); cacheConf = new CacheConfig(conf); - HFileContext meta = new HFileContext(); - meta.setBlocksize(HConstants.DEFAULT_BLOCKSIZE); - meta.setChecksumType(CKTYPE); - meta.setBytesPerChecksum(CKBYTES); - meta.setEncodingOnDisk(dataBlockEncoderAlgo); - meta.setEncodingInCache(dataBlockEncoderAlgo); + HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL) + .withChecksumType(CKTYPE) + .withBytesPerCheckSum(CKBYTES) + .withDataBlockEncodingInCache(dataBlockEncoderAlgo) + .withDataBlockEncodingOnDisk(dataBlockEncoderAlgo) + .build(); // Make a store file and write data to it. StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) .withFilePath(path) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index 8d03c932abb..b42b2f808c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; @@ -327,7 +328,7 @@ public class TestWALReplay { HLog wal = createWAL(this.conf); HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf); Path f = new Path(basedir, "hfile"); - HFileContext context = new HFileContext(); + HFileContext context = new HFileContextBuilder().build(); HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withPath(fs, f) .withFileContext(context).create(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 8f4aaaa5b8b..5ea1c8cdaa8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -843,7 +844,7 @@ public class TestAccessController extends SecureTestUtil { HFile.Writer writer = null; long now = System.currentTimeMillis(); try { - HFileContext context = new HFileContext(); + HFileContext context = new HFileContextBuilder().build(); writer = HFile.getWriterFactory(conf, new CacheConfig(conf)) .withPath(fs, path) .withFileContext(context)