HBASE-4422 Move block cache parameters and references into single CacheConf class (jgray)
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1182194 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
c52469aad9
commit
52c4ce5cdd
|
@ -12,6 +12,8 @@ Release 0.93.0 - Unreleased
|
|||
transaction log (dhruba via jgray)
|
||||
HBASE-4145 Provide metrics for hbase client (Ming Ma)
|
||||
HBASE-4465 Lazy-seek optimization for StoreFile scanners (mikhail/liyin)
|
||||
HBASE-4422 Move block cache parameters and references into single
|
||||
CacheConf class (jgray)
|
||||
|
||||
BUG FIXES
|
||||
HBASE-4488 Store could miss rows during flush (Lars H via jgray)
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -59,10 +59,10 @@ public class HalfStoreFileReader extends StoreFile.Reader {
|
|||
* @param r
|
||||
* @throws IOException
|
||||
*/
|
||||
public HalfStoreFileReader(final FileSystem fs, final Path p, final BlockCache c,
|
||||
final Reference r)
|
||||
public HalfStoreFileReader(final FileSystem fs, final Path p,
|
||||
final CacheConfig cacheConf, final Reference r)
|
||||
throws IOException {
|
||||
super(fs, p, c, false, false);
|
||||
super(fs, p, cacheConf);
|
||||
// This is not actual midkey for this half-file; its just border
|
||||
// around which we split top and bottom. Have to look in files to find
|
||||
// actual last and first keys for bottom and top halves. Half-files don't
|
||||
|
|
|
@ -77,25 +77,13 @@ public abstract class AbstractHFileReader implements HFile.Reader {
|
|||
/** Size of this file. */
|
||||
protected final long fileSize;
|
||||
|
||||
/** Block cache to use. */
|
||||
protected final BlockCache blockCache;
|
||||
/** Block cache configuration. */
|
||||
protected final CacheConfig cacheConf;
|
||||
|
||||
protected AtomicLong cacheHits = new AtomicLong();
|
||||
protected AtomicLong blockLoads = new AtomicLong();
|
||||
protected AtomicLong metaLoads = new AtomicLong();
|
||||
|
||||
/**
|
||||
* Whether file is from in-memory store (comes from column family
|
||||
* configuration).
|
||||
*/
|
||||
protected boolean inMemory = false;
|
||||
|
||||
/**
|
||||
* Whether blocks of file should be evicted from the block cache when the
|
||||
* file is being closed
|
||||
*/
|
||||
protected final boolean evictOnClose;
|
||||
|
||||
/** Path of file */
|
||||
protected final Path path;
|
||||
|
||||
|
@ -110,16 +98,13 @@ public abstract class AbstractHFileReader implements HFile.Reader {
|
|||
protected AbstractHFileReader(Path path, FixedFileTrailer trailer,
|
||||
final FSDataInputStream fsdis, final long fileSize,
|
||||
final boolean closeIStream,
|
||||
final BlockCache blockCache, final boolean inMemory,
|
||||
final boolean evictOnClose) {
|
||||
final CacheConfig cacheConf) {
|
||||
this.trailer = trailer;
|
||||
this.compressAlgo = trailer.getCompressionCodec();
|
||||
this.blockCache = blockCache;
|
||||
this.cacheConf = cacheConf;
|
||||
this.fileSize = fileSize;
|
||||
this.istream = fsdis;
|
||||
this.closeIStream = closeIStream;
|
||||
this.inMemory = inMemory;
|
||||
this.evictOnClose = evictOnClose;
|
||||
this.path = path;
|
||||
this.name = path.getName();
|
||||
cfStatsPrefix = "cf." + parseCfNameFromPath(path.toString());
|
||||
|
@ -167,7 +152,7 @@ public abstract class AbstractHFileReader implements HFile.Reader {
|
|||
return "reader=" + path.toString() +
|
||||
(!isFileInfoLoaded()? "":
|
||||
", compression=" + compressAlgo.getName() +
|
||||
", inMemory=" + inMemory +
|
||||
", cacheConf=" + cacheConf +
|
||||
", firstKey=" + toStringFirstKey() +
|
||||
", lastKey=" + toStringLastKey()) +
|
||||
", avgKeyLen=" + avgKeyLen +
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
@ -91,17 +90,8 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
|
|||
/** May be null if we were passed a stream. */
|
||||
protected final Path path;
|
||||
|
||||
/** Whether to cache key/value data blocks on write */
|
||||
protected final boolean cacheDataBlocksOnWrite;
|
||||
|
||||
/** Whether to cache non-root index blocks on write */
|
||||
protected final boolean cacheIndexBlocksOnWrite;
|
||||
|
||||
/** Block cache to optionally fill on write. */
|
||||
protected BlockCache blockCache;
|
||||
|
||||
/** Configuration used for block cache initialization */
|
||||
private Configuration conf;
|
||||
/** Cache configuration for caching data on write. */
|
||||
protected final CacheConfig cacheConf;
|
||||
|
||||
/**
|
||||
* Name for this object used when logging or in toString. Is either
|
||||
|
@ -109,7 +99,7 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
|
|||
*/
|
||||
protected final String name;
|
||||
|
||||
public AbstractHFileWriter(Configuration conf,
|
||||
public AbstractHFileWriter(CacheConfig cacheConf,
|
||||
FSDataOutputStream outputStream, Path path, int blockSize,
|
||||
Compression.Algorithm compressAlgo, KeyComparator comparator) {
|
||||
this.outputStream = outputStream;
|
||||
|
@ -122,15 +112,7 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
|
|||
: Bytes.BYTES_RAWCOMPARATOR;
|
||||
|
||||
closeOutputStream = path != null;
|
||||
|
||||
cacheDataBlocksOnWrite = conf.getBoolean(HFile.CACHE_BLOCKS_ON_WRITE_KEY,
|
||||
false);
|
||||
cacheIndexBlocksOnWrite = HFileBlockIndex.shouldCacheOnWrite(conf);
|
||||
|
||||
this.conf = conf;
|
||||
|
||||
if (cacheDataBlocksOnWrite || cacheIndexBlocksOnWrite)
|
||||
initBlockCache();
|
||||
this.cacheConf = cacheConf;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -275,13 +257,4 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
|
|||
fs.getDefaultReplication(), fs.getDefaultBlockSize(),
|
||||
null);
|
||||
}
|
||||
|
||||
/** Initializes the block cache to use for cache-on-write */
|
||||
protected void initBlockCache() {
|
||||
if (blockCache == null) {
|
||||
blockCache = StoreFile.getBlockCache(conf);
|
||||
conf = null; // This is all we need configuration for.
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -83,14 +83,36 @@ public interface BlockCache {
|
|||
*/
|
||||
public void shutdown();
|
||||
|
||||
/**
|
||||
* Returns the total size of the block cache, in bytes.
|
||||
* @return size of cache, in bytes
|
||||
*/
|
||||
public long size();
|
||||
|
||||
/**
|
||||
* Returns the free size of the block cache, in bytes.
|
||||
* @return free space in cache, in bytes
|
||||
*/
|
||||
public long getFreeSize();
|
||||
|
||||
/**
|
||||
* Returns the occupied size of the block cache, in bytes.
|
||||
* @return occupied space in cache, in bytes
|
||||
*/
|
||||
public long getCurrentSize();
|
||||
|
||||
/**
|
||||
* Returns the number of evictions that have occurred.
|
||||
* @return number of evictions
|
||||
*/
|
||||
public long getEvictedCount();
|
||||
|
||||
/**
|
||||
* Returns the number of blocks currently cached in the block cache.
|
||||
* @return number of blocks in the cache
|
||||
*/
|
||||
public long getBlockCount();
|
||||
|
||||
/**
|
||||
* Performs a BlockCache summary and returns a List of BlockCacheColumnFamilySummary objects.
|
||||
* This method could be fairly heavyweight in that it evaluates the entire HBase file-system
|
||||
|
|
|
@ -0,0 +1,326 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.io.hfile;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.MemoryUsage;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.util.DirectMemoryUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Stores all of the cache objects and configuration for a single HFile.
|
||||
*/
|
||||
public class CacheConfig {
|
||||
private static final Log LOG = LogFactory.getLog(CacheConfig.class.getName());
|
||||
|
||||
/**
|
||||
* Configuration key for the size of the block cache, in bytes.
|
||||
*/
|
||||
public static final String HFILE_BLOCK_CACHE_SIZE_KEY =
|
||||
"hfile.block.cache.size";
|
||||
|
||||
/**
|
||||
* Configuration key to cache data blocks on write. There are separate
|
||||
* switches for bloom blocks and non-root index blocks.
|
||||
*/
|
||||
public static final String CACHE_BLOCKS_ON_WRITE_KEY =
|
||||
"hbase.rs.cacheblocksonwrite";
|
||||
|
||||
/**
|
||||
* Configuration key to cache leaf and intermediate-level index blocks on
|
||||
* write.
|
||||
*/
|
||||
public static final String CACHE_INDEX_BLOCKS_ON_WRITE_KEY =
|
||||
"hfile.block.index.cacheonwrite";
|
||||
|
||||
/**
|
||||
* Configuration key to cache compound bloom filter blocks on write.
|
||||
*/
|
||||
public static final String CACHE_BLOOM_BLOCKS_ON_WRITE_KEY =
|
||||
"hfile.block.bloom.cacheonwrite";
|
||||
|
||||
/**
|
||||
* TODO: Implement this (jgray)
|
||||
* Configuration key to cache data blocks in compressed format.
|
||||
*/
|
||||
public static final String CACHE_DATA_BLOCKS_COMPRESSED_KEY =
|
||||
"hbase.rs.blockcache.cachedatacompressed";
|
||||
|
||||
/**
|
||||
* Configuration key to evict all blocks of a given file from the block cache
|
||||
* when the file is closed.
|
||||
*/
|
||||
public static final String EVICT_BLOCKS_ON_CLOSE_KEY =
|
||||
"hbase.rs.evictblocksonclose";
|
||||
|
||||
// Defaults
|
||||
|
||||
public static final boolean DEFAULT_CACHE_DATA_ON_READ = true;
|
||||
public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
|
||||
public static final boolean DEFAULT_IN_MEMORY = false;
|
||||
public static final boolean DEFAULT_CACHE_INDEXES_ON_WRITE = false;
|
||||
public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
|
||||
public static final boolean DEFAULT_EVICT_ON_CLOSE = false;
|
||||
public static final boolean DEFAULT_COMPRESSED_CACHE = false;
|
||||
|
||||
/** Local reference to the block cache, null if completely disabled */
|
||||
private final BlockCache blockCache;
|
||||
|
||||
/**
|
||||
* Whether blocks should be cached on read (default is on if there is a
|
||||
* cache but this can be turned off on a per-family or per-request basis)
|
||||
*/
|
||||
private boolean cacheDataOnRead;
|
||||
|
||||
/** Whether blocks should be flagged as in-memory when being cached */
|
||||
private final boolean inMemory;
|
||||
|
||||
/** Whether data blocks should be cached when new files are written */
|
||||
private final boolean cacheDataOnWrite;
|
||||
|
||||
/** Whether index blocks should be cached when new files are written */
|
||||
private final boolean cacheIndexesOnWrite;
|
||||
|
||||
/** Whether compound bloom filter blocks should be cached on write */
|
||||
private final boolean cacheBloomsOnWrite;
|
||||
|
||||
/** Whether blocks of a file should be evicted when the file is closed */
|
||||
private final boolean evictOnClose;
|
||||
|
||||
/** Whether data blocks should be stored in compressed form in the cache */
|
||||
private final boolean cacheCompressed;
|
||||
|
||||
/**
|
||||
* Create a cache configuration using the specified configuration object and
|
||||
* family descriptor.
|
||||
* @param conf hbase configuration
|
||||
* @param family column family configuration
|
||||
*/
|
||||
public CacheConfig(Configuration conf, HColumnDescriptor family) {
|
||||
this(CacheConfig.instantiateBlockCache(conf),
|
||||
family.isBlockCacheEnabled(), family.isInMemory(),
|
||||
conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE),
|
||||
conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
|
||||
DEFAULT_CACHE_INDEXES_ON_WRITE),
|
||||
conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
|
||||
DEFAULT_CACHE_BLOOMS_ON_WRITE),
|
||||
conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE),
|
||||
conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_COMPRESSED_CACHE)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a cache configuration using the specified configuration object and
|
||||
* defaults for family level settings.
|
||||
* @param conf hbase configuration
|
||||
*/
|
||||
public CacheConfig(Configuration conf) {
|
||||
this(CacheConfig.instantiateBlockCache(conf),
|
||||
DEFAULT_CACHE_DATA_ON_READ,
|
||||
DEFAULT_IN_MEMORY, // This is a family-level setting so can't be set
|
||||
// strictly from conf
|
||||
conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE),
|
||||
conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
|
||||
DEFAULT_CACHE_INDEXES_ON_WRITE),
|
||||
conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
|
||||
DEFAULT_CACHE_BLOOMS_ON_WRITE),
|
||||
conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE),
|
||||
conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY,
|
||||
DEFAULT_COMPRESSED_CACHE)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a block cache configuration with the specified cache and
|
||||
* configuration parameters.
|
||||
* @param blockCache reference to block cache, null if completely disabled
|
||||
* @param cacheDataOnRead whether data blocks should be cached on read
|
||||
* @param inMemory whether blocks should be flagged as in-memory
|
||||
* @param cacheDataOnWrite whether data blocks should be cached on write
|
||||
* @param cacheIndexesOnWrite whether index blocks should be cached on write
|
||||
* @param cacheBloomsOnWrite whether blooms should be cached on write
|
||||
* @param evictOnClose whether blocks should be evicted when HFile is closed
|
||||
* @param cacheCompressed whether to store blocks as compressed in the cache
|
||||
*/
|
||||
CacheConfig(final BlockCache blockCache,
|
||||
final boolean cacheDataOnRead, final boolean inMemory,
|
||||
final boolean cacheDataOnWrite, final boolean cacheIndexesOnWrite,
|
||||
final boolean cacheBloomsOnWrite, final boolean evictOnClose,
|
||||
final boolean cacheCompressed) {
|
||||
this.blockCache = blockCache;
|
||||
this.cacheDataOnRead = cacheDataOnRead;
|
||||
this.inMemory = inMemory;
|
||||
this.cacheDataOnWrite = cacheDataOnWrite;
|
||||
this.cacheIndexesOnWrite = cacheIndexesOnWrite;
|
||||
this.cacheBloomsOnWrite = cacheBloomsOnWrite;
|
||||
this.evictOnClose = evictOnClose;
|
||||
this.cacheCompressed = cacheCompressed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a cache configuration copied from the specified configuration.
|
||||
* @param cacheConf
|
||||
*/
|
||||
public CacheConfig(CacheConfig cacheConf) {
|
||||
this(cacheConf.blockCache, cacheConf.cacheDataOnRead, cacheConf.inMemory,
|
||||
cacheConf.cacheDataOnWrite, cacheConf.cacheIndexesOnWrite,
|
||||
cacheConf.cacheBloomsOnWrite, cacheConf.evictOnClose,
|
||||
cacheConf.cacheCompressed);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether the block cache is enabled.
|
||||
*/
|
||||
public boolean isBlockCacheEnabled() {
|
||||
return this.blockCache != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the block cache.
|
||||
* @return the block cache, or null if caching is completely disabled
|
||||
*/
|
||||
public BlockCache getBlockCache() {
|
||||
return this.blockCache;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the blocks of this HFile should be cached on read or not.
|
||||
* @return true if blocks should be cached on read, false if not
|
||||
*/
|
||||
public boolean shouldCacheDataOnRead() {
|
||||
return isBlockCacheEnabled() && cacheDataOnRead;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if blocks in this file should be flagged as in-memory
|
||||
*/
|
||||
public boolean isInMemory() {
|
||||
return isBlockCacheEnabled() && this.inMemory;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if data blocks should be written to the cache when an HFile is
|
||||
* written, false if not
|
||||
*/
|
||||
public boolean shouldCacheDataOnWrite() {
|
||||
return isBlockCacheEnabled() && this.cacheDataOnWrite;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if index blocks should be written to the cache when an HFile
|
||||
* is written, false if not
|
||||
*/
|
||||
public boolean shouldCacheIndexesOnWrite() {
|
||||
return isBlockCacheEnabled() && this.cacheIndexesOnWrite;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if bloom blocks should be written to the cache when an HFile
|
||||
* is written, false if not
|
||||
*/
|
||||
public boolean shouldCacheBloomsOnWrite() {
|
||||
return isBlockCacheEnabled() && this.cacheBloomsOnWrite;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if blocks should be evicted from the cache when an HFile
|
||||
* reader is closed, false if not
|
||||
*/
|
||||
public boolean shouldEvictOnClose() {
|
||||
return isBlockCacheEnabled() && this.evictOnClose;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if blocks should be compressed in the cache, false if not
|
||||
*/
|
||||
public boolean shouldCacheCompressed() {
|
||||
return isBlockCacheEnabled() && this.cacheCompressed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
if (!isBlockCacheEnabled()) {
|
||||
return "CacheConfig:disabled";
|
||||
}
|
||||
return "CacheConfig:enabled " +
|
||||
"[cacheDataOnRead=" + shouldCacheDataOnRead() + "] " +
|
||||
"[cacheDataOnWrite=" + shouldCacheDataOnWrite() + "] " +
|
||||
"[cacheIndexesOnWrite=" + shouldCacheIndexesOnWrite() + "] " +
|
||||
"[cacheBloomsOnWrite=" + shouldCacheBloomsOnWrite() + "] " +
|
||||
"[cacheEvictOnClose=" + shouldEvictOnClose() + "] " +
|
||||
"[cacheCompressed=" + shouldCacheCompressed() + "]";
|
||||
}
|
||||
|
||||
// Static block cache reference and methods
|
||||
|
||||
/**
|
||||
* Static reference to the block cache, or null if no caching should be used
|
||||
* at all.
|
||||
*/
|
||||
private static BlockCache globalBlockCache;
|
||||
|
||||
/** Boolean whether we have disabled the block cache entirely. */
|
||||
private static boolean blockCacheDisabled = false;
|
||||
|
||||
/**
|
||||
* Returns the block cache or <code>null</code> in case none should be used.
|
||||
*
|
||||
* @param conf The current configuration.
|
||||
* @return The block cache or <code>null</code>.
|
||||
*/
|
||||
private static synchronized BlockCache instantiateBlockCache(
|
||||
Configuration conf) {
|
||||
if (globalBlockCache != null) return globalBlockCache;
|
||||
if (blockCacheDisabled) return null;
|
||||
|
||||
float cachePercentage = conf.getFloat(HFILE_BLOCK_CACHE_SIZE_KEY, 0.2f);
|
||||
if (cachePercentage == 0L) {
|
||||
blockCacheDisabled = true;
|
||||
return null;
|
||||
}
|
||||
if (cachePercentage > 1.0) {
|
||||
throw new IllegalArgumentException(HFILE_BLOCK_CACHE_SIZE_KEY +
|
||||
" must be between 0.0 and 1.0, not > 1.0");
|
||||
}
|
||||
|
||||
// Calculate the amount of heap to give the heap.
|
||||
MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
|
||||
long cacheSize = (long)(mu.getMax() * cachePercentage);
|
||||
int blockSize = conf.getInt("hbase.offheapcache.minblocksize",
|
||||
HFile.DEFAULT_BLOCKSIZE);
|
||||
long offHeapCacheSize =
|
||||
(long) (conf.getFloat("hbase.offheapcache.percentage", (float) 0.95) *
|
||||
DirectMemoryUtils.getDirectMemorySize());
|
||||
LOG.info("Allocating LruBlockCache with maximum size " +
|
||||
StringUtils.humanReadableInt(cacheSize));
|
||||
if (offHeapCacheSize <= 0) {
|
||||
globalBlockCache = new LruBlockCache(cacheSize,
|
||||
StoreFile.DEFAULT_BLOCKSIZE_SMALL);
|
||||
} else {
|
||||
globalBlockCache = new DoubleBlockCache(cacheSize, offHeapCacheSize,
|
||||
StoreFile.DEFAULT_BLOCKSIZE_SMALL, blockSize, conf);
|
||||
}
|
||||
return globalBlockCache;
|
||||
}
|
||||
}
|
|
@ -166,4 +166,9 @@ public class DoubleBlockCache implements BlockCache, HeapSize {
|
|||
return onHeapCache.getBlockCacheColumnFamilySummaries(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getBlockCount() {
|
||||
return onHeapCache.getBlockCount() + offHeapCache.getBlockCount();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -198,8 +198,12 @@ public class HFile {
|
|||
*/
|
||||
public static abstract class WriterFactory {
|
||||
protected Configuration conf;
|
||||
protected CacheConfig cacheConf;
|
||||
|
||||
WriterFactory(Configuration conf) { this.conf = conf; }
|
||||
WriterFactory(Configuration conf, CacheConfig cacheConf) {
|
||||
this.conf = conf;
|
||||
this.cacheConf = cacheConf;
|
||||
}
|
||||
|
||||
public abstract Writer createWriter(FileSystem fs, Path path)
|
||||
throws IOException;
|
||||
|
@ -236,33 +240,29 @@ public class HFile {
|
|||
* can also be {@link HFileWriterV1#WRITER_FACTORY_V1} in testing.
|
||||
*/
|
||||
public static final WriterFactory getWriterFactory(Configuration conf) {
|
||||
return HFile.getWriterFactory(conf, new CacheConfig(conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the factory to be used to create {@link HFile} writers. Should
|
||||
* always be {@link HFileWriterV2#WRITER_FACTORY_V2} in production, but
|
||||
* can also be {@link HFileWriterV1#WRITER_FACTORY_V1} in testing.
|
||||
*/
|
||||
public static final WriterFactory getWriterFactory(Configuration conf,
|
||||
CacheConfig cacheConf) {
|
||||
int version = getFormatVersion(conf);
|
||||
LOG.debug("Using HFile format version " + version);
|
||||
switch (version) {
|
||||
case 1:
|
||||
return new HFileWriterV1.WriterFactoryV1(conf);
|
||||
return new HFileWriterV1.WriterFactoryV1(conf, cacheConf);
|
||||
case 2:
|
||||
return new HFileWriterV2.WriterFactoryV2(conf);
|
||||
return new HFileWriterV2.WriterFactoryV2(conf, cacheConf);
|
||||
default:
|
||||
throw new IllegalArgumentException("Cannot create writer for HFile " +
|
||||
"format version " + version);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration key to evict all blocks of a given file from the block cache
|
||||
* when the file is closed.
|
||||
*/
|
||||
public static final String EVICT_BLOCKS_ON_CLOSE_KEY =
|
||||
"hbase.rs.evictblocksonclose";
|
||||
|
||||
/**
|
||||
* Configuration key to cache data blocks on write. There are separate
|
||||
* switches for Bloom blocks and non-root index blocks.
|
||||
*/
|
||||
public static final String CACHE_BLOCKS_ON_WRITE_KEY =
|
||||
"hbase.rs.cacheblocksonwrite";
|
||||
|
||||
/** An abstraction used by the block index */
|
||||
public interface CachingBlockReader {
|
||||
HFileBlock readBlock(long offset, long onDiskBlockSize,
|
||||
|
@ -325,35 +325,32 @@ public class HFile {
|
|||
}
|
||||
|
||||
private static Reader pickReaderVersion(Path path, FSDataInputStream fsdis,
|
||||
long size, boolean closeIStream, BlockCache blockCache,
|
||||
boolean inMemory, boolean evictOnClose) throws IOException {
|
||||
long size, boolean closeIStream, CacheConfig cacheConf)
|
||||
throws IOException {
|
||||
FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, size);
|
||||
switch (trailer.getVersion()) {
|
||||
case 1:
|
||||
return new HFileReaderV1(path, trailer, fsdis, size, closeIStream,
|
||||
blockCache, inMemory, evictOnClose);
|
||||
cacheConf);
|
||||
case 2:
|
||||
return new HFileReaderV2(path, trailer, fsdis, size, closeIStream,
|
||||
blockCache, inMemory, evictOnClose);
|
||||
cacheConf);
|
||||
default:
|
||||
throw new IOException("Cannot instantiate reader for HFile version " +
|
||||
trailer.getVersion());
|
||||
}
|
||||
}
|
||||
|
||||
public static Reader createReader(
|
||||
FileSystem fs, Path path, BlockCache blockCache, boolean inMemory,
|
||||
boolean evictOnClose) throws IOException {
|
||||
public static Reader createReader(FileSystem fs, Path path,
|
||||
CacheConfig cacheConf) throws IOException {
|
||||
return pickReaderVersion(path, fs.open(path),
|
||||
fs.getFileStatus(path).getLen(), true, blockCache, inMemory,
|
||||
evictOnClose);
|
||||
fs.getFileStatus(path).getLen(), true, cacheConf);
|
||||
}
|
||||
|
||||
public static Reader createReader(Path path, FSDataInputStream fsdis,
|
||||
long size, BlockCache blockache, boolean inMemory, boolean evictOnClose)
|
||||
long size, CacheConfig cacheConf)
|
||||
throws IOException {
|
||||
return pickReaderVersion(path, fsdis, size, false, blockache, inMemory,
|
||||
evictOnClose);
|
||||
return pickReaderVersion(path, fsdis, size, false, cacheConf);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -185,7 +185,7 @@ public class HFilePrettyPrinter {
|
|||
System.err.println("ERROR, file doesnt exist: " + file);
|
||||
}
|
||||
|
||||
HFile.Reader reader = HFile.createReader(fs, file, null, false, false);
|
||||
HFile.Reader reader = HFile.createReader(fs, file, new CacheConfig(conf));
|
||||
|
||||
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
|
||||
|
||||
|
|
|
@ -54,18 +54,14 @@ public class HFileReaderV1 extends AbstractHFileReader {
|
|||
* @param fsdis input stream. Caller is responsible for closing the passed
|
||||
* stream.
|
||||
* @param size Length of the stream.
|
||||
* @param blockCache block cache. Pass null if none.
|
||||
* @param inMemory whether blocks should be marked as in-memory in cache
|
||||
* @param evictOnClose whether blocks in cache should be evicted on close
|
||||
* @param cacheConf cache references and configuration
|
||||
* @throws IOException
|
||||
*/
|
||||
public HFileReaderV1(Path path, FixedFileTrailer trailer,
|
||||
final FSDataInputStream fsdis, final long size,
|
||||
final boolean closeIStream,
|
||||
final BlockCache blockCache, final boolean inMemory,
|
||||
final boolean evictOnClose) {
|
||||
super(path, trailer, fsdis, size, closeIStream, blockCache, inMemory,
|
||||
evictOnClose);
|
||||
final CacheConfig cacheConf) {
|
||||
super(path, trailer, fsdis, size, closeIStream, cacheConf);
|
||||
|
||||
trailer.expectVersion(1);
|
||||
fsBlockReader = new HFileBlock.FSReaderV1(fsdis, compressAlgo, fileSize);
|
||||
|
@ -221,9 +217,10 @@ public class HFileReaderV1 extends AbstractHFileReader {
|
|||
synchronized (metaBlockIndexReader.getRootBlockKey(block)) {
|
||||
metaLoads.incrementAndGet();
|
||||
// Check cache for block. If found return.
|
||||
if (blockCache != null) {
|
||||
HFileBlock cachedBlock = (HFileBlock) blockCache.getBlock(cacheKey,
|
||||
true);
|
||||
if (cacheConf.isBlockCacheEnabled()) {
|
||||
HFileBlock cachedBlock =
|
||||
(HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey,
|
||||
cacheConf.shouldCacheDataOnRead());
|
||||
if (cachedBlock != null) {
|
||||
cacheHits.incrementAndGet();
|
||||
return cachedBlock.getBufferWithoutHeader();
|
||||
|
@ -240,8 +237,9 @@ public class HFileReaderV1 extends AbstractHFileReader {
|
|||
HFile.readOps.incrementAndGet();
|
||||
|
||||
// Cache the block
|
||||
if (cacheBlock && blockCache != null) {
|
||||
blockCache.cacheBlock(cacheKey, hfileBlock, inMemory);
|
||||
if (cacheConf.shouldCacheDataOnRead() && cacheBlock) {
|
||||
cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock,
|
||||
cacheConf.isInMemory());
|
||||
}
|
||||
|
||||
return hfileBlock.getBufferWithoutHeader();
|
||||
|
@ -279,9 +277,10 @@ public class HFileReaderV1 extends AbstractHFileReader {
|
|||
blockLoads.incrementAndGet();
|
||||
|
||||
// Check cache for block. If found return.
|
||||
if (blockCache != null) {
|
||||
HFileBlock cachedBlock = (HFileBlock) blockCache.getBlock(cacheKey,
|
||||
true);
|
||||
if (cacheConf.isBlockCacheEnabled()) {
|
||||
HFileBlock cachedBlock =
|
||||
(HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey,
|
||||
cacheConf.shouldCacheDataOnRead());
|
||||
if (cachedBlock != null) {
|
||||
cacheHits.incrementAndGet();
|
||||
return cachedBlock.getBufferWithoutHeader();
|
||||
|
@ -312,8 +311,9 @@ public class HFileReaderV1 extends AbstractHFileReader {
|
|||
HFile.readOps.incrementAndGet();
|
||||
|
||||
// Cache the block
|
||||
if (cacheBlock && blockCache != null) {
|
||||
blockCache.cacheBlock(cacheKey, hfileBlock, inMemory);
|
||||
if (cacheConf.shouldCacheDataOnRead() && cacheBlock) {
|
||||
cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock,
|
||||
cacheConf.isInMemory());
|
||||
}
|
||||
|
||||
return buf;
|
||||
|
@ -348,10 +348,10 @@ public class HFileReaderV1 extends AbstractHFileReader {
|
|||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (evictOnClose && this.blockCache != null) {
|
||||
if (cacheConf.shouldEvictOnClose()) {
|
||||
int numEvicted = 0;
|
||||
for (int i = 0; i < dataBlockIndexReader.getRootBlockCount(); i++) {
|
||||
if (blockCache.evictBlock(HFile.getBlockCacheKey(name,
|
||||
if (cacheConf.getBlockCache().evictBlock(HFile.getBlockCacheKey(name,
|
||||
dataBlockIndexReader.getRootBlockOffset(i))))
|
||||
numEvicted++;
|
||||
}
|
||||
|
|
|
@ -65,20 +65,20 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
* Opens a HFile. You must load the index before you can use it by calling
|
||||
* {@link #loadFileInfo()}.
|
||||
*
|
||||
* @param path Path to HFile.
|
||||
* @param trailer File trailer.
|
||||
* @param fsdis input stream. Caller is responsible for closing the passed
|
||||
* stream.
|
||||
* @param size Length of the stream.
|
||||
* @param blockCache block cache. Pass null if none.
|
||||
* @param inMemory whether blocks should be marked as in-memory in cache
|
||||
* @param evictOnClose whether blocks in cache should be evicted on close
|
||||
* @param closeIStream Whether to close the stream.
|
||||
* @param cacheConf Cache configuration.
|
||||
* @throws IOException
|
||||
*/
|
||||
public HFileReaderV2(Path path, FixedFileTrailer trailer,
|
||||
final FSDataInputStream fsdis, final long size,
|
||||
final boolean closeIStream, final BlockCache blockCache,
|
||||
final boolean inMemory, final boolean evictOnClose) throws IOException {
|
||||
super(path, trailer, fsdis, size, closeIStream, blockCache, inMemory,
|
||||
evictOnClose);
|
||||
final boolean closeIStream, final CacheConfig cacheConf)
|
||||
throws IOException {
|
||||
super(path, trailer, fsdis, size, closeIStream, cacheConf);
|
||||
|
||||
trailer.expectVersion(2);
|
||||
fsBlockReader = new HFileBlock.FSReaderV2(fsdis, compressAlgo,
|
||||
|
@ -174,9 +174,10 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
long metaBlockOffset = metaBlockIndexReader.getRootBlockOffset(block);
|
||||
String cacheKey = HFile.getBlockCacheKey(name, metaBlockOffset);
|
||||
|
||||
if (blockCache != null) {
|
||||
HFileBlock cachedBlock = (HFileBlock) blockCache.getBlock(cacheKey,
|
||||
true);
|
||||
cacheBlock &= cacheConf.shouldCacheDataOnRead();
|
||||
if (cacheConf.isBlockCacheEnabled()) {
|
||||
HFileBlock cachedBlock =
|
||||
(HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock);
|
||||
if (cachedBlock != null) {
|
||||
// Return a distinct 'shallow copy' of the block,
|
||||
// so pos does not get messed by the scanner
|
||||
|
@ -193,8 +194,9 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
HFile.readOps.incrementAndGet();
|
||||
|
||||
// Cache the block
|
||||
if (cacheBlock && blockCache != null) {
|
||||
blockCache.cacheBlock(cacheKey, metaBlock, inMemory);
|
||||
if (cacheBlock) {
|
||||
cacheConf.getBlockCache().cacheBlock(cacheKey, metaBlock,
|
||||
cacheConf.isInMemory());
|
||||
}
|
||||
|
||||
return metaBlock.getBufferWithoutHeader();
|
||||
|
@ -237,9 +239,10 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
blockLoads.incrementAndGet();
|
||||
|
||||
// Check cache for block. If found return.
|
||||
if (blockCache != null) {
|
||||
HFileBlock cachedBlock = (HFileBlock) blockCache.getBlock(cacheKey,
|
||||
true);
|
||||
cacheBlock &= cacheConf.shouldCacheDataOnRead();
|
||||
if (cacheConf.isBlockCacheEnabled()) {
|
||||
HFileBlock cachedBlock =
|
||||
(HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock);
|
||||
if (cachedBlock != null) {
|
||||
cacheHits.incrementAndGet();
|
||||
|
||||
|
@ -257,8 +260,9 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
HFile.readOps.incrementAndGet();
|
||||
|
||||
// Cache the block
|
||||
if (cacheBlock && blockCache != null) {
|
||||
blockCache.cacheBlock(cacheKey, dataBlock, inMemory);
|
||||
if (cacheBlock) {
|
||||
cacheConf.getBlockCache().cacheBlock(cacheKey, dataBlock,
|
||||
cacheConf.isInMemory());
|
||||
}
|
||||
|
||||
return dataBlock;
|
||||
|
@ -289,8 +293,8 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (evictOnClose && blockCache != null) {
|
||||
int numEvicted = blockCache.evictBlocksByPrefix(name
|
||||
if (cacheConf.shouldEvictOnClose()) {
|
||||
int numEvicted = cacheConf.getBlockCache().evictBlocksByPrefix(name
|
||||
+ HFile.CACHE_KEY_SEPARATOR);
|
||||
LOG.debug("On close of file " + name + " evicted " + numEvicted
|
||||
+ " block(s)");
|
||||
|
|
|
@ -79,18 +79,20 @@ public class HFileWriterV1 extends AbstractHFileWriter {
|
|||
|
||||
static class WriterFactoryV1 extends HFile.WriterFactory {
|
||||
|
||||
WriterFactoryV1(Configuration conf) { super(conf); }
|
||||
WriterFactoryV1(Configuration conf, CacheConfig cacheConf) {
|
||||
super(conf, cacheConf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writer createWriter(FileSystem fs, Path path) throws IOException {
|
||||
return new HFileWriterV1(conf, fs, path);
|
||||
return new HFileWriterV1(conf, cacheConf, fs, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writer createWriter(FileSystem fs, Path path, int blockSize,
|
||||
Compression.Algorithm compressAlgo, final KeyComparator comparator)
|
||||
throws IOException {
|
||||
return new HFileWriterV1(conf, fs, path, blockSize,
|
||||
return new HFileWriterV1(conf, cacheConf, fs, path, blockSize,
|
||||
compressAlgo, comparator);
|
||||
}
|
||||
|
||||
|
@ -98,7 +100,7 @@ public class HFileWriterV1 extends AbstractHFileWriter {
|
|||
public Writer createWriter(FileSystem fs, Path path, int blockSize,
|
||||
String compressAlgoName,
|
||||
final KeyComparator comparator) throws IOException {
|
||||
return new HFileWriterV1(conf, fs, path, blockSize,
|
||||
return new HFileWriterV1(conf, cacheConf, fs, path, blockSize,
|
||||
compressAlgoName, comparator);
|
||||
}
|
||||
|
||||
|
@ -106,21 +108,23 @@ public class HFileWriterV1 extends AbstractHFileWriter {
|
|||
public Writer createWriter(final FSDataOutputStream ostream,
|
||||
final int blockSize, final String compress,
|
||||
final KeyComparator comparator) throws IOException {
|
||||
return new HFileWriterV1(conf, ostream, blockSize, compress, comparator);
|
||||
return new HFileWriterV1(cacheConf, ostream, blockSize, compress,
|
||||
comparator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writer createWriter(final FSDataOutputStream ostream,
|
||||
final int blockSize, final Compression.Algorithm compress,
|
||||
final KeyComparator c) throws IOException {
|
||||
return new HFileWriterV1(conf, ostream, blockSize, compress, c);
|
||||
return new HFileWriterV1(cacheConf, ostream, blockSize, compress, c);
|
||||
}
|
||||
}
|
||||
|
||||
/** Constructor that uses all defaults for compression and block size. */
|
||||
public HFileWriterV1(Configuration conf, FileSystem fs, Path path)
|
||||
public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
|
||||
FileSystem fs, Path path)
|
||||
throws IOException {
|
||||
this(conf, fs, path, HFile.DEFAULT_BLOCKSIZE,
|
||||
this(conf, cacheConf, fs, path, HFile.DEFAULT_BLOCKSIZE,
|
||||
HFile.DEFAULT_COMPRESSION_ALGORITHM,
|
||||
null);
|
||||
}
|
||||
|
@ -129,37 +133,37 @@ public class HFileWriterV1 extends AbstractHFileWriter {
|
|||
* Constructor that takes a path, creates and closes the output stream. Takes
|
||||
* compression algorithm name as string.
|
||||
*/
|
||||
public HFileWriterV1(Configuration conf, FileSystem fs, Path path,
|
||||
int blockSize, String compressAlgoName,
|
||||
public HFileWriterV1(Configuration conf, CacheConfig cacheConf, FileSystem fs,
|
||||
Path path, int blockSize, String compressAlgoName,
|
||||
final KeyComparator comparator) throws IOException {
|
||||
this(conf, fs, path, blockSize,
|
||||
this(conf, cacheConf, fs, path, blockSize,
|
||||
compressionByName(compressAlgoName), comparator);
|
||||
}
|
||||
|
||||
/** Constructor that takes a path, creates and closes the output stream. */
|
||||
public HFileWriterV1(Configuration conf, FileSystem fs, Path path,
|
||||
int blockSize, Compression.Algorithm compress,
|
||||
public HFileWriterV1(Configuration conf, CacheConfig cacheConf, FileSystem fs,
|
||||
Path path, int blockSize, Compression.Algorithm compress,
|
||||
final KeyComparator comparator) throws IOException {
|
||||
super(conf, createOutputStream(conf, fs, path), path,
|
||||
super(cacheConf, createOutputStream(conf, fs, path), path,
|
||||
blockSize, compress, comparator);
|
||||
}
|
||||
|
||||
/** Constructor that takes a stream. */
|
||||
public HFileWriterV1(Configuration conf,
|
||||
public HFileWriterV1(CacheConfig cacheConf,
|
||||
final FSDataOutputStream outputStream, final int blockSize,
|
||||
final String compressAlgoName, final KeyComparator comparator)
|
||||
throws IOException {
|
||||
this(conf, outputStream, blockSize,
|
||||
this(cacheConf, outputStream, blockSize,
|
||||
Compression.getCompressionAlgorithmByName(compressAlgoName),
|
||||
comparator);
|
||||
}
|
||||
|
||||
/** Constructor that takes a stream. */
|
||||
public HFileWriterV1(Configuration conf,
|
||||
public HFileWriterV1(CacheConfig cacheConf,
|
||||
final FSDataOutputStream outputStream, final int blockSize,
|
||||
final Compression.Algorithm compress, final KeyComparator comparator)
|
||||
throws IOException {
|
||||
super(conf, outputStream, null, blockSize, compress, comparator);
|
||||
super(cacheConf, outputStream, null, blockSize, compress, comparator);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -194,10 +198,11 @@ public class HFileWriterV1 extends AbstractHFileWriter {
|
|||
HFile.writeTimeNano.addAndGet(System.nanoTime() - startTimeNs);
|
||||
HFile.writeOps.incrementAndGet();
|
||||
|
||||
if (cacheDataBlocksOnWrite) {
|
||||
if (cacheConf.shouldCacheDataOnWrite()) {
|
||||
baosDos.flush();
|
||||
byte[] bytes = baos.toByteArray();
|
||||
blockCache.cacheBlock(HFile.getBlockCacheKey(name, blockBegin),
|
||||
cacheConf.getBlockCache().cacheBlock(
|
||||
HFile.getBlockCacheKey(name, blockBegin),
|
||||
new HFileBlock(BlockType.DATA,
|
||||
(int) (outputStream.getPos() - blockBegin), bytes.length, -1,
|
||||
ByteBuffer.wrap(bytes, 0, bytes.length), true, blockBegin));
|
||||
|
@ -217,7 +222,7 @@ public class HFileWriterV1 extends AbstractHFileWriter {
|
|||
this.out = getCompressingStream();
|
||||
BlockType.DATA.write(out);
|
||||
firstKeyInBlock = null;
|
||||
if (cacheDataBlocksOnWrite) {
|
||||
if (cacheConf.shouldCacheDataOnWrite()) {
|
||||
this.baos = new ByteArrayOutputStream();
|
||||
this.baosDos = new DataOutputStream(baos);
|
||||
baosDos.write(HFileBlock.DUMMY_HEADER);
|
||||
|
@ -361,7 +366,7 @@ public class HFileWriterV1 extends AbstractHFileWriter {
|
|||
this.lastKeyLength = klength;
|
||||
this.entryCount++;
|
||||
// If we are pre-caching blocks on write, fill byte array stream
|
||||
if (cacheDataBlocksOnWrite) {
|
||||
if (cacheConf.shouldCacheDataOnWrite()) {
|
||||
this.baosDos.writeInt(klength);
|
||||
this.baosDos.writeInt(vlength);
|
||||
this.baosDos.write(key, koffset, klength);
|
||||
|
|
|
@ -65,19 +65,21 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
|
||||
static class WriterFactoryV2 extends HFile.WriterFactory {
|
||||
|
||||
WriterFactoryV2(Configuration conf) { super(conf); }
|
||||
WriterFactoryV2(Configuration conf, CacheConfig cacheConf) {
|
||||
super(conf, cacheConf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writer createWriter(FileSystem fs, Path path)
|
||||
throws IOException {
|
||||
return new HFileWriterV2(conf, fs, path);
|
||||
return new HFileWriterV2(conf, cacheConf, fs, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writer createWriter(FileSystem fs, Path path, int blockSize,
|
||||
Compression.Algorithm compress,
|
||||
final KeyComparator comparator) throws IOException {
|
||||
return new HFileWriterV2(conf, fs, path, blockSize,
|
||||
return new HFileWriterV2(conf, cacheConf, fs, path, blockSize,
|
||||
compress, comparator);
|
||||
}
|
||||
|
||||
|
@ -85,7 +87,7 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
public Writer createWriter(FileSystem fs, Path path, int blockSize,
|
||||
String compress, final KeyComparator comparator)
|
||||
throws IOException {
|
||||
return new HFileWriterV2(conf, fs, path, blockSize,
|
||||
return new HFileWriterV2(conf, cacheConf, fs, path, blockSize,
|
||||
compress, comparator);
|
||||
}
|
||||
|
||||
|
@ -93,21 +95,24 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
public Writer createWriter(final FSDataOutputStream ostream,
|
||||
final int blockSize, final String compress,
|
||||
final KeyComparator comparator) throws IOException {
|
||||
return new HFileWriterV2(conf, ostream, blockSize, compress, comparator);
|
||||
return new HFileWriterV2(conf, cacheConf, ostream, blockSize, compress,
|
||||
comparator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writer createWriter(final FSDataOutputStream ostream,
|
||||
final int blockSize, final Compression.Algorithm compress,
|
||||
final KeyComparator c) throws IOException {
|
||||
return new HFileWriterV2(conf, ostream, blockSize, compress, c);
|
||||
return new HFileWriterV2(conf, cacheConf, ostream, blockSize, compress,
|
||||
c);
|
||||
}
|
||||
}
|
||||
|
||||
/** Constructor that uses all defaults for compression and block size. */
|
||||
public HFileWriterV2(Configuration conf, FileSystem fs, Path path)
|
||||
public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
|
||||
FileSystem fs, Path path)
|
||||
throws IOException {
|
||||
this(conf, fs, path, HFile.DEFAULT_BLOCKSIZE,
|
||||
this(conf, cacheConf, fs, path, HFile.DEFAULT_BLOCKSIZE,
|
||||
HFile.DEFAULT_COMPRESSION_ALGORITHM, null);
|
||||
}
|
||||
|
||||
|
@ -115,38 +120,38 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
* Constructor that takes a path, creates and closes the output stream. Takes
|
||||
* compression algorithm name as string.
|
||||
*/
|
||||
public HFileWriterV2(Configuration conf, FileSystem fs, Path path,
|
||||
int blockSize, String compressAlgoName,
|
||||
public HFileWriterV2(Configuration conf, CacheConfig cacheConf, FileSystem fs,
|
||||
Path path, int blockSize, String compressAlgoName,
|
||||
final KeyComparator comparator) throws IOException {
|
||||
this(conf, fs, path, blockSize,
|
||||
this(conf, cacheConf, fs, path, blockSize,
|
||||
compressionByName(compressAlgoName), comparator);
|
||||
}
|
||||
|
||||
/** Constructor that takes a path, creates and closes the output stream. */
|
||||
public HFileWriterV2(Configuration conf, FileSystem fs, Path path,
|
||||
int blockSize, Compression.Algorithm compressAlgo,
|
||||
public HFileWriterV2(Configuration conf, CacheConfig cacheConf, FileSystem fs,
|
||||
Path path, int blockSize, Compression.Algorithm compressAlgo,
|
||||
final KeyComparator comparator) throws IOException {
|
||||
super(conf, createOutputStream(conf, fs, path), path,
|
||||
super(cacheConf, createOutputStream(conf, fs, path), path,
|
||||
blockSize, compressAlgo, comparator);
|
||||
finishInit(conf);
|
||||
}
|
||||
|
||||
/** Constructor that takes a stream. */
|
||||
public HFileWriterV2(final Configuration conf,
|
||||
public HFileWriterV2(final Configuration conf, final CacheConfig cacheConf,
|
||||
final FSDataOutputStream outputStream, final int blockSize,
|
||||
final String compressAlgoName, final KeyComparator comparator)
|
||||
throws IOException {
|
||||
this(conf, outputStream, blockSize,
|
||||
this(conf, cacheConf, outputStream, blockSize,
|
||||
Compression.getCompressionAlgorithmByName(compressAlgoName),
|
||||
comparator);
|
||||
}
|
||||
|
||||
/** Constructor that takes a stream. */
|
||||
public HFileWriterV2(final Configuration conf,
|
||||
public HFileWriterV2(final Configuration conf, final CacheConfig cacheConf,
|
||||
final FSDataOutputStream outputStream, final int blockSize,
|
||||
final Compression.Algorithm compress, final KeyComparator comparator)
|
||||
throws IOException {
|
||||
super(conf, outputStream, null, blockSize, compress, comparator);
|
||||
super(cacheConf, outputStream, null, blockSize, compress, comparator);
|
||||
finishInit(conf);
|
||||
}
|
||||
|
||||
|
@ -159,9 +164,10 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
fsBlockWriter = new HFileBlock.Writer(compressAlgo);
|
||||
|
||||
// Data block index writer
|
||||
boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
|
||||
dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter,
|
||||
cacheIndexBlocksOnWrite ? blockCache : null,
|
||||
cacheIndexBlocksOnWrite ? name : null);
|
||||
cacheIndexesOnWrite ? cacheConf.getBlockCache(): null,
|
||||
cacheIndexesOnWrite ? name : null);
|
||||
dataBlockIndexWriter.setMaxChunkSize(
|
||||
HFileBlockIndex.getMaxChunkSize(conf));
|
||||
inlineBlockWriters.add(dataBlockIndexWriter);
|
||||
|
@ -208,8 +214,9 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
HFile.writeTimeNano.addAndGet(System.nanoTime() - startTimeNs);
|
||||
HFile.writeOps.incrementAndGet();
|
||||
|
||||
if (cacheDataBlocksOnWrite) {
|
||||
blockCache.cacheBlock(HFile.getBlockCacheKey(name, lastDataBlockOffset),
|
||||
if (cacheConf.shouldCacheDataOnWrite()) {
|
||||
cacheConf.getBlockCache().cacheBlock(
|
||||
HFile.getBlockCacheKey(name, lastDataBlockOffset),
|
||||
fsBlockWriter.getBlockForCaching());
|
||||
}
|
||||
}
|
||||
|
@ -228,7 +235,8 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
|
||||
if (cacheThisBlock) {
|
||||
// Cache this block on write.
|
||||
blockCache.cacheBlock(HFile.getBlockCacheKey(name, offset),
|
||||
cacheConf.getBlockCache().cacheBlock(
|
||||
HFile.getBlockCacheKey(name, offset),
|
||||
fsBlockWriter.getBlockForCaching());
|
||||
}
|
||||
}
|
||||
|
@ -242,7 +250,8 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
*/
|
||||
private void newBlock() throws IOException {
|
||||
// This is where the next block begins.
|
||||
fsBlockWriter.startWriting(BlockType.DATA, cacheDataBlocksOnWrite);
|
||||
fsBlockWriter.startWriting(BlockType.DATA,
|
||||
cacheConf.shouldCacheDataOnWrite());
|
||||
firstKeyInBlock = null;
|
||||
}
|
||||
|
||||
|
@ -370,7 +379,7 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
long offset = outputStream.getPos();
|
||||
// write the metadata content
|
||||
DataOutputStream dos = fsBlockWriter.startWriting(BlockType.META,
|
||||
cacheDataBlocksOnWrite);
|
||||
cacheConf.shouldCacheDataOnWrite());
|
||||
metaData.get(i).write(dos);
|
||||
|
||||
fsBlockWriter.writeHeaderAndData(outputStream);
|
||||
|
@ -424,8 +433,6 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
@Override
|
||||
public void addInlineBlockWriter(InlineBlockWriter ibw) {
|
||||
inlineBlockWriters.add(ibw);
|
||||
if (blockCache == null && ibw.cacheOnWrite())
|
||||
initBlockCache();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
|
@ -525,6 +524,11 @@ public class LruBlockCache implements BlockCache, HeapSize {
|
|||
return this.elements.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getBlockCount() {
|
||||
return this.elements.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of eviction runs that have occurred
|
||||
*/
|
||||
|
@ -624,7 +628,7 @@ public class LruBlockCache implements BlockCache, HeapSize {
|
|||
public CacheStats getStats() {
|
||||
return this.stats;
|
||||
}
|
||||
|
||||
|
||||
public final static long CACHE_FIXED_OVERHEAD = ClassSize.align(
|
||||
(3 * Bytes.SIZEOF_LONG) + (8 * ClassSize.REFERENCE) +
|
||||
(5 * Bytes.SIZEOF_FLOAT) + Bytes.SIZEOF_BOOLEAN
|
||||
|
@ -645,18 +649,18 @@ public class LruBlockCache implements BlockCache, HeapSize {
|
|||
|
||||
@Override
|
||||
public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries(Configuration conf) throws IOException {
|
||||
|
||||
|
||||
Map<String, Path> sfMap = FSUtils.getTableStoreFilePathMap(
|
||||
FileSystem.get(conf),
|
||||
FSUtils.getRootDir(conf));
|
||||
|
||||
// quirky, but it's a compound key and this is a shortcut taken instead of
|
||||
|
||||
// quirky, but it's a compound key and this is a shortcut taken instead of
|
||||
// creating a class that would represent only a key.
|
||||
Map<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary> bcs =
|
||||
Map<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary> bcs =
|
||||
new HashMap<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary>();
|
||||
|
||||
final String pattern = "\\" + HFile.CACHE_KEY_SEPARATOR;
|
||||
|
||||
|
||||
for (CachedBlock cb : map.values()) {
|
||||
// split name and get the first part (e.g., "8351478435190657655_0")
|
||||
// see HFile.getBlockCacheKey for structure of block cache key.
|
||||
|
@ -665,7 +669,7 @@ public class LruBlockCache implements BlockCache, HeapSize {
|
|||
String sf = s[0];
|
||||
Path path = sfMap.get(sf);
|
||||
if ( path != null) {
|
||||
BlockCacheColumnFamilySummary lookup =
|
||||
BlockCacheColumnFamilySummary lookup =
|
||||
BlockCacheColumnFamilySummary.createFromStoreFilePath(path);
|
||||
BlockCacheColumnFamilySummary bcse = bcs.get(lookup);
|
||||
if (bcse == null) {
|
||||
|
@ -677,12 +681,12 @@ public class LruBlockCache implements BlockCache, HeapSize {
|
|||
}
|
||||
}
|
||||
}
|
||||
List<BlockCacheColumnFamilySummary> list =
|
||||
List<BlockCacheColumnFamilySummary> list =
|
||||
new ArrayList<BlockCacheColumnFamilySummary>(bcs.values());
|
||||
Collections.sort( list );
|
||||
Collections.sort( list );
|
||||
return list;
|
||||
}
|
||||
|
||||
|
||||
// Simple calculators of sizes given factors and maxSize
|
||||
|
||||
private long acceptableSize() {
|
||||
|
|
|
@ -128,5 +128,11 @@ public class SimpleBlockCache implements BlockCache {
|
|||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getBlockCount() {
|
||||
// TODO: implement this if we ever actually use this block cache
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -269,6 +269,11 @@ public class SingleSizeCache implements BlockCache, HeapSize {
|
|||
return this.stats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getBlockCount() {
|
||||
return numBlocks - backingStore.getBlocksRemaining();
|
||||
}
|
||||
|
||||
/* Since its offheap, it doesn't matter if its in memory or not */
|
||||
@Override
|
||||
public void cacheBlock(String blockName, Cacheable buf, boolean inMemory) {
|
||||
|
|
|
@ -375,6 +375,15 @@ public class SlabCache implements SlabItemEvictionWatcher, BlockCache, HeapSize
|
|||
return 0; // this cache, by default, allocates all its space.
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getBlockCount() {
|
||||
long count = 0;
|
||||
for (SingleSizeCache cache : backingStore.values()) {
|
||||
count += cache.getBlockCount();
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
public long getCurrentSize() {
|
||||
return size;
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.KeyValue;
|
|||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
|
@ -169,7 +170,8 @@ public class HFileOutputFormat extends FileOutputFormat<ImmutableBytesWritable,
|
|||
Path familydir = new Path(outputdir, Bytes.toString(family));
|
||||
String compression = compressionMap.get(family);
|
||||
compression = compression == null ? defaultCompression : compression;
|
||||
wl.writer = HFile.getWriterFactory(conf).createWriter(fs,
|
||||
wl.writer =
|
||||
HFile.getWriterFactory(conf).createWriter(fs,
|
||||
StoreFile.getUniqueFile(fs, familydir), blocksize,
|
||||
compression, KeyValue.KEY_COMPARATOR);
|
||||
this.writers.put(family, wl);
|
||||
|
|
|
@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.client.ServerCallable;
|
|||
import org.apache.hadoop.hbase.io.HalfStoreFileReader;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.io.Reference.Range;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
|
||||
|
@ -288,7 +289,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
throws IOException {
|
||||
final Path hfilePath = item.hfilePath;
|
||||
final FileSystem fs = hfilePath.getFileSystem(getConf());
|
||||
HFile.Reader hfr = HFile.createReader(fs, hfilePath, null, false, false);
|
||||
HFile.Reader hfr = HFile.createReader(fs, hfilePath,
|
||||
new CacheConfig(getConf()));
|
||||
final byte[] first, last;
|
||||
try {
|
||||
hfr.loadFileInfo();
|
||||
|
@ -378,10 +380,12 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
HColumnDescriptor familyDescriptor)
|
||||
throws IOException {
|
||||
FileSystem fs = inFile.getFileSystem(conf);
|
||||
CacheConfig cacheConf = new CacheConfig(conf);
|
||||
HalfStoreFileReader halfReader = null;
|
||||
StoreFile.Writer halfWriter = null;
|
||||
try {
|
||||
halfReader = new HalfStoreFileReader(fs, inFile, null, reference);
|
||||
halfReader = new HalfStoreFileReader(fs, inFile, cacheConf,
|
||||
reference);
|
||||
Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
|
||||
|
||||
int blocksize = familyDescriptor.getBlocksize();
|
||||
|
@ -389,8 +393,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
|
||||
|
||||
halfWriter = new StoreFile.Writer(
|
||||
fs, outFile, blocksize, compression, conf, KeyValue.COMPARATOR,
|
||||
bloomFilterType, 0);
|
||||
fs, outFile, blocksize, compression, conf, cacheConf,
|
||||
KeyValue.COMPARATOR, bloomFilterType, 0);
|
||||
HFileScanner scanner = halfReader.getScanner(false, false);
|
||||
scanner.seekTo();
|
||||
do {
|
||||
|
@ -490,7 +494,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
for (Path hfile : hfiles) {
|
||||
if (hfile.getName().startsWith("_")) continue;
|
||||
|
||||
HFile.Reader reader = HFile.createReader(fs, hfile, null, false, false);
|
||||
HFile.Reader reader = HFile.createReader(fs, hfile,
|
||||
new CacheConfig(getConf()));
|
||||
final byte[] first, last;
|
||||
try {
|
||||
reader.loadFileInfo();
|
||||
|
|
|
@ -85,6 +85,7 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
|||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.io.TimeRange;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRPC;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
|
@ -4098,7 +4099,8 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
processTable(fs, tableDir, log, c, majorCompact);
|
||||
} finally {
|
||||
log.close();
|
||||
BlockCache bc = StoreFile.getBlockCache(c);
|
||||
// TODO: is this still right?
|
||||
BlockCache bc = new CacheConfig(c).getBlockCache();
|
||||
if (bc != null) bc.shutdown();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.ClockOutOfSyncException;
|
|||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
|
||||
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
|
@ -76,6 +75,7 @@ import org.apache.hadoop.hbase.TableDescriptors;
|
|||
import org.apache.hadoop.hbase.UnknownRowLockException;
|
||||
import org.apache.hadoop.hbase.UnknownScannerException;
|
||||
import org.apache.hadoop.hbase.YouAreDeadException;
|
||||
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
|
||||
import org.apache.hadoop.hbase.catalog.CatalogTracker;
|
||||
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
||||
import org.apache.hadoop.hbase.catalog.RootLocationEditor;
|
||||
|
@ -97,10 +97,11 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
|||
import org.apache.hadoop.hbase.executor.ExecutorService;
|
||||
import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
|
||||
import org.apache.hadoop.hbase.filter.BinaryComparator;
|
||||
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
||||
import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
|
||||
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheStats;
|
||||
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRPC;
|
||||
|
@ -291,6 +292,9 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
|||
|
||||
private final RegionServerAccounting regionServerAccounting;
|
||||
|
||||
// Cache configuration and block cache reference
|
||||
private final CacheConfig cacheConfig;
|
||||
|
||||
/**
|
||||
* The server name the Master sees us as. Its made from the hostname the
|
||||
* master passes us, port, and server startcode. Gets set after registration
|
||||
|
@ -386,6 +390,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
|||
User.login(this.conf, "hbase.regionserver.keytab.file",
|
||||
"hbase.regionserver.kerberos.principal", this.isa.getHostName());
|
||||
regionServerAccounting = new RegionServerAccounting();
|
||||
cacheConfig = new CacheConfig(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -687,9 +692,8 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
|||
}
|
||||
}
|
||||
// Send cache a shutdown.
|
||||
BlockCache c = StoreFile.getBlockCache(this.conf);
|
||||
if (c != null) {
|
||||
c.shutdown();
|
||||
if (cacheConfig.isBlockCacheEnabled()) {
|
||||
cacheConfig.getBlockCache().shutdown();
|
||||
}
|
||||
|
||||
// Send interrupts to wake up threads if sleeping so they notice shutdown.
|
||||
|
@ -1277,7 +1281,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
|||
this.metrics.readRequestsCount.set(readRequestsCount);
|
||||
this.metrics.writeRequestsCount.set(writeRequestsCount);
|
||||
|
||||
BlockCache blockCache = StoreFile.getBlockCache(conf);
|
||||
BlockCache blockCache = cacheConfig.getBlockCache();
|
||||
if (blockCache != null) {
|
||||
this.metrics.blockCacheCount.set(blockCache.size());
|
||||
this.metrics.blockCacheFree.set(blockCache.getFreeSize());
|
||||
|
@ -3230,7 +3234,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
|||
|
||||
@Override
|
||||
public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries() throws IOException {
|
||||
BlockCache c = StoreFile.getBlockCache(this.conf);
|
||||
BlockCache c = new CacheConfig(this.conf).getBlockCache();
|
||||
return c.getBlockCacheColumnFamilySummaries(this.conf);
|
||||
}
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.KeyValue;
|
|||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
|
@ -52,8 +53,8 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
|
|||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.CollectionBackedScanner;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -93,6 +94,7 @@ public class Store implements HeapSize {
|
|||
private final HColumnDescriptor family;
|
||||
final FileSystem fs;
|
||||
final Configuration conf;
|
||||
final CacheConfig cacheConf;
|
||||
// ttl in milliseconds.
|
||||
protected long ttl;
|
||||
protected int minVersions;
|
||||
|
@ -115,7 +117,6 @@ public class Store implements HeapSize {
|
|||
private final Object flushLock = new Object();
|
||||
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
|
||||
private final String storeNameStr;
|
||||
private final boolean inMemory;
|
||||
private CompactionProgress progress;
|
||||
private final int compactionKVMax;
|
||||
|
||||
|
@ -195,8 +196,8 @@ public class Store implements HeapSize {
|
|||
conf.getInt("hbase.hstore.compaction.min",
|
||||
/*old name*/ conf.getInt("hbase.hstore.compactionThreshold", 3)));
|
||||
|
||||
// Check if this is in-memory store
|
||||
this.inMemory = family.isInMemory();
|
||||
// Setting up cache configuration for this family
|
||||
this.cacheConf = new CacheConfig(conf, family);
|
||||
this.blockingStoreFileCount =
|
||||
conf.getInt("hbase.hstore.blockingStoreFiles", 7);
|
||||
|
||||
|
@ -270,8 +271,8 @@ public class Store implements HeapSize {
|
|||
}
|
||||
StoreFile curfile = null;
|
||||
try {
|
||||
curfile = new StoreFile(fs, p, blockcache, this.conf,
|
||||
this.family.getBloomFilterType(), this.inMemory);
|
||||
curfile = new StoreFile(fs, p, this.conf, this.cacheConf,
|
||||
this.family.getBloomFilterType());
|
||||
curfile.createReader();
|
||||
} catch (IOException ioe) {
|
||||
LOG.warn("Failed open of " + p + "; presumption is that file was " +
|
||||
|
@ -335,7 +336,7 @@ public class Store implements HeapSize {
|
|||
LOG.info("Validating hfile at " + srcPath + " for inclusion in "
|
||||
+ "store " + this + " region " + this.region);
|
||||
reader = HFile.createReader(srcPath.getFileSystem(conf),
|
||||
srcPath, null, false, false);
|
||||
srcPath, cacheConf);
|
||||
reader.loadFileInfo();
|
||||
|
||||
byte[] firstKey = reader.getFirstRowKey();
|
||||
|
@ -375,8 +376,8 @@ public class Store implements HeapSize {
|
|||
LOG.info("Renaming bulk load file " + srcPath + " to " + dstPath);
|
||||
StoreFile.rename(fs, srcPath, dstPath);
|
||||
|
||||
StoreFile sf = new StoreFile(fs, dstPath, blockcache,
|
||||
this.conf, this.family.getBloomFilterType(), this.inMemory);
|
||||
StoreFile sf = new StoreFile(fs, dstPath, this.conf, this.cacheConf,
|
||||
this.family.getBloomFilterType());
|
||||
sf.createReader();
|
||||
|
||||
LOG.info("Moved hfile " + srcPath + " into store directory " +
|
||||
|
@ -530,8 +531,8 @@ public class Store implements HeapSize {
|
|||
}
|
||||
|
||||
status.setStatus("Flushing " + this + ": reopening flushed file");
|
||||
StoreFile sf = new StoreFile(this.fs, dstPath, blockcache,
|
||||
this.conf, this.family.getBloomFilterType(), this.inMemory);
|
||||
StoreFile sf = new StoreFile(this.fs, dstPath, this.conf, this.cacheConf,
|
||||
this.family.getBloomFilterType());
|
||||
StoreFile.Reader r = sf.createReader();
|
||||
this.storeSize += r.length();
|
||||
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
|
||||
|
@ -562,7 +563,7 @@ public class Store implements HeapSize {
|
|||
Compression.Algorithm compression)
|
||||
throws IOException {
|
||||
return StoreFile.createWriter(this.fs, region.getTmpDir(), this.blocksize,
|
||||
compression, this.comparator, this.conf,
|
||||
compression, this.comparator, this.conf, this.cacheConf,
|
||||
this.family.getBloomFilterType(), maxKeyCount);
|
||||
}
|
||||
|
||||
|
@ -1227,8 +1228,8 @@ public class Store implements HeapSize {
|
|||
LOG.error("Failed move of compacted file " + compactedFile.getPath(), e);
|
||||
return null;
|
||||
}
|
||||
result = new StoreFile(this.fs, p, blockcache, this.conf,
|
||||
this.family.getBloomFilterType(), this.inMemory);
|
||||
result = new StoreFile(this.fs, p, this.conf, this.cacheConf,
|
||||
this.family.getBloomFilterType());
|
||||
result.createReader();
|
||||
}
|
||||
this.lock.writeLock().lock();
|
||||
|
@ -1790,9 +1791,9 @@ public class Store implements HeapSize {
|
|||
}
|
||||
|
||||
public static final long FIXED_OVERHEAD = ClassSize.align(
|
||||
ClassSize.OBJECT + (16 * ClassSize.REFERENCE) +
|
||||
ClassSize.OBJECT + (17 * ClassSize.REFERENCE) +
|
||||
(7 * Bytes.SIZEOF_LONG) + (1 * Bytes.SIZEOF_DOUBLE) +
|
||||
(6 * Bytes.SIZEOF_INT) + (3 * Bytes.SIZEOF_BOOLEAN));
|
||||
(6 * Bytes.SIZEOF_INT) + (2 * Bytes.SIZEOF_BOOLEAN));
|
||||
|
||||
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
|
||||
ClassSize.OBJECT + ClassSize.REENTRANT_LOCK +
|
||||
|
|
|
@ -22,8 +22,6 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
import java.io.DataInput;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.MemoryUsage;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
@ -49,22 +47,19 @@ import org.apache.hadoop.hbase.client.Scan;
|
|||
import org.apache.hadoop.hbase.io.HalfStoreFileReader;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.DoubleBlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileWriterV1;
|
||||
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
|
||||
import org.apache.hadoop.hbase.util.BloomFilter;
|
||||
import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
||||
import org.apache.hadoop.hbase.util.BloomFilterWriter;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.DirectMemoryUtils;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
@ -140,11 +135,8 @@ public class StoreFile {
|
|||
// If this StoreFile references another, this is the other files path.
|
||||
private Path referencePath;
|
||||
|
||||
// Should the block cache be used or not.
|
||||
private boolean blockcache;
|
||||
|
||||
// Is this from an in-memory store
|
||||
private boolean inMemory;
|
||||
// Block cache configuration and reference.
|
||||
private final CacheConfig cacheConf;
|
||||
|
||||
// HDFS blocks distribuion information
|
||||
private HDFSBlocksDistribution hdfsBlocksDistribution;
|
||||
|
@ -200,6 +192,7 @@ public class StoreFile {
|
|||
* @param p The path of the file.
|
||||
* @param blockcache <code>true</code> if the block cache is enabled.
|
||||
* @param conf The current configuration.
|
||||
* @param cacheConf The cache configuration and block cache reference.
|
||||
* @param cfBloomType The bloom type to use for this store file as specified
|
||||
* by column family configuration. This may or may not be the same
|
||||
* as the Bloom filter type actually present in the HFile, because
|
||||
|
@ -209,16 +202,14 @@ public class StoreFile {
|
|||
*/
|
||||
StoreFile(final FileSystem fs,
|
||||
final Path p,
|
||||
final boolean blockcache,
|
||||
final Configuration conf,
|
||||
final BloomType cfBloomType,
|
||||
final boolean inMemory)
|
||||
final CacheConfig cacheConf,
|
||||
final BloomType cfBloomType)
|
||||
throws IOException {
|
||||
this.conf = conf;
|
||||
this.fs = fs;
|
||||
this.path = p;
|
||||
this.blockcache = blockcache;
|
||||
this.inMemory = inMemory;
|
||||
this.cacheConf = cacheConf;
|
||||
if (isReference(p)) {
|
||||
this.reference = Reference.read(fs, p);
|
||||
this.referencePath = getReferredToFile(this.path);
|
||||
|
@ -365,49 +356,6 @@ public class StoreFile {
|
|||
return Bytes.toLong(metadataMap.get(BULKLOAD_TIME_KEY));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the block cache or <code>null</code> in case none should be used.
|
||||
*
|
||||
* @param conf The current configuration.
|
||||
* @return The block cache or <code>null</code>.
|
||||
*/
|
||||
public static synchronized BlockCache getBlockCache(Configuration conf) {
|
||||
if (hfileBlockCache != null) return hfileBlockCache;
|
||||
|
||||
float cachePercentage = conf.getFloat(HFILE_BLOCK_CACHE_SIZE_KEY, 0.2f);
|
||||
// There should be a better way to optimize this. But oh well.
|
||||
if (cachePercentage == 0L) return null;
|
||||
if (cachePercentage > 1.0) {
|
||||
throw new IllegalArgumentException(HFILE_BLOCK_CACHE_SIZE_KEY +
|
||||
" must be between 0.0 and 1.0, not > 1.0");
|
||||
}
|
||||
|
||||
// Calculate the amount of heap to give the heap.
|
||||
MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
|
||||
long cacheSize = (long)(mu.getMax() * cachePercentage);
|
||||
int blockSize = conf.getInt("hbase.offheapcache.minblocksize", HFile.DEFAULT_BLOCKSIZE);
|
||||
long offHeapCacheSize = (long) (conf.getFloat("hbase.offheapcache.percentage", (float) 0.95) * DirectMemoryUtils.getDirectMemorySize());
|
||||
boolean enableOffHeapCache = conf.getBoolean("hbase.offheapcache.enable", false);
|
||||
LOG.info("Allocating LruBlockCache with maximum size " +
|
||||
StringUtils.humanReadableInt(cacheSize));
|
||||
if(offHeapCacheSize <= 0 || !enableOffHeapCache) {
|
||||
hfileBlockCache = new LruBlockCache(cacheSize, DEFAULT_BLOCKSIZE_SMALL);
|
||||
} else {
|
||||
LOG.info("Allocating OffHeapCache with maximum size " +
|
||||
StringUtils.humanReadableInt(offHeapCacheSize));
|
||||
hfileBlockCache = new DoubleBlockCache(cacheSize, offHeapCacheSize, DEFAULT_BLOCKSIZE_SMALL, blockSize, conf);
|
||||
}
|
||||
return hfileBlockCache;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the blockcache
|
||||
*/
|
||||
public BlockCache getBlockCache() {
|
||||
return blockcache ? getBlockCache(conf) : null;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @return the cached value of HDFS blocks distribution. The cached value is
|
||||
* calculated when store file is opened.
|
||||
|
@ -497,11 +445,9 @@ public class StoreFile {
|
|||
}
|
||||
if (isReference()) {
|
||||
this.reader = new HalfStoreFileReader(this.fs, this.referencePath,
|
||||
getBlockCache(), this.reference);
|
||||
this.cacheConf, this.reference);
|
||||
} else {
|
||||
this.reader = new Reader(this.fs, this.path, getBlockCache(),
|
||||
this.inMemory,
|
||||
this.conf.getBoolean(HFile.EVICT_BLOCKS_ON_CLOSE_KEY, true));
|
||||
this.reader = new Reader(this.fs, this.path, this.cacheConf);
|
||||
}
|
||||
|
||||
computeHDFSBlockDistribution();
|
||||
|
@ -664,9 +610,10 @@ public class StoreFile {
|
|||
* @throws IOException
|
||||
*/
|
||||
public static Writer createWriter(final FileSystem fs, final Path dir,
|
||||
final int blocksize, Configuration conf) throws IOException {
|
||||
return createWriter(fs, dir, blocksize, null, null, conf, BloomType.NONE,
|
||||
0);
|
||||
final int blocksize, Configuration conf, CacheConfig cacheConf)
|
||||
throws IOException {
|
||||
return createWriter(fs, dir, blocksize, null, null, conf, cacheConf,
|
||||
BloomType.NONE, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -679,6 +626,7 @@ public class StoreFile {
|
|||
* @param algorithm Pass null to get default.
|
||||
* @param c Pass null to get default.
|
||||
* @param conf HBase system configuration. used with bloom filters
|
||||
* @param cacheConf Cache configuration and reference.
|
||||
* @param bloomType column family setting for bloom filters
|
||||
* @param maxKeyCount estimated maximum number of keys we expect to add
|
||||
* @return HFile.Writer
|
||||
|
@ -690,6 +638,7 @@ public class StoreFile {
|
|||
final Compression.Algorithm algorithm,
|
||||
final KeyValue.KVComparator c,
|
||||
final Configuration conf,
|
||||
final CacheConfig cacheConf,
|
||||
BloomType bloomType,
|
||||
long maxKeyCount)
|
||||
throws IOException {
|
||||
|
@ -704,7 +653,8 @@ public class StoreFile {
|
|||
|
||||
return new Writer(fs, path, blocksize,
|
||||
algorithm == null? HFile.DEFAULT_COMPRESSION_ALGORITHM: algorithm,
|
||||
conf, c == null ? KeyValue.COMPARATOR: c, bloomType, maxKeyCount);
|
||||
conf, cacheConf, c == null ? KeyValue.COMPARATOR: c, bloomType,
|
||||
maxKeyCount);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -826,6 +776,7 @@ public class StoreFile {
|
|||
*/
|
||||
public Writer(FileSystem fs, Path path, int blocksize,
|
||||
Compression.Algorithm compress, final Configuration conf,
|
||||
CacheConfig cacheConf,
|
||||
final KVComparator comparator, BloomType bloomType, long maxKeys)
|
||||
throws IOException {
|
||||
writer = HFile.getWriterFactory(conf).createWriter(
|
||||
|
@ -834,7 +785,7 @@ public class StoreFile {
|
|||
|
||||
this.kvComparator = comparator;
|
||||
|
||||
bloomFilterWriter = BloomFilterFactory.createBloomAtWrite(conf,
|
||||
bloomFilterWriter = BloomFilterFactory.createBloomAtWrite(conf, cacheConf,
|
||||
bloomType, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
|
||||
if (bloomFilterWriter != null) {
|
||||
this.bloomType = bloomType;
|
||||
|
@ -1033,10 +984,9 @@ public class StoreFile {
|
|||
protected long sequenceID = -1;
|
||||
private byte[] lastBloomKey;
|
||||
|
||||
public Reader(FileSystem fs, Path path, BlockCache blockCache,
|
||||
boolean inMemory, boolean evictOnClose)
|
||||
public Reader(FileSystem fs, Path path, CacheConfig cacheConf)
|
||||
throws IOException {
|
||||
reader = HFile.createReader(fs, path, blockCache, inMemory, evictOnClose);
|
||||
reader = HFile.createReader(fs, path, cacheConf);
|
||||
bloomFilterType = BloomType.NONE;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
|
||||
|
@ -75,10 +76,6 @@ public final class BloomFilterFactory {
|
|||
public static final String IO_STOREFILE_BLOOM_BLOCK_SIZE =
|
||||
"io.storefile.bloom.block.size";
|
||||
|
||||
/** Whether to cache compound Bloom filter blocks on write */
|
||||
public static final String IO_STOREFILE_BLOOM_CACHE_ON_WRITE =
|
||||
"io.storefile.bloom.cacheonwrite";
|
||||
|
||||
/** Maximum number of times a Bloom filter can be "folded" if oversized */
|
||||
private static final int MAX_ALLOWED_FOLD_FACTOR = 7;
|
||||
|
||||
|
@ -140,7 +137,8 @@ public final class BloomFilterFactory {
|
|||
* or when failed to create one.
|
||||
*/
|
||||
public static BloomFilterWriter createBloomAtWrite(Configuration conf,
|
||||
BloomType bloomType, int maxKeys, HFile.Writer writer) {
|
||||
CacheConfig cacheConf, BloomType bloomType, int maxKeys,
|
||||
HFile.Writer writer) {
|
||||
if (!isBloomEnabled(conf)) {
|
||||
LOG.info("Bloom filters are disabled by configuration for "
|
||||
+ writer.getPath()
|
||||
|
@ -169,7 +167,7 @@ public final class BloomFilterFactory {
|
|||
// In case of compound Bloom filters we ignore the maxKeys hint.
|
||||
CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(
|
||||
getBloomBlockSize(conf), err, Hash.getHashType(conf), maxFold,
|
||||
cacheChunksOnWrite(conf), bloomType == BloomType.ROWCOL
|
||||
cacheConf.shouldCacheBloomsOnWrite(), bloomType == BloomType.ROWCOL
|
||||
? KeyValue.KEY_COMPARATOR : Bytes.BYTES_RAWCOMPARATOR);
|
||||
writer.addInlineBlockWriter(bloomWriter);
|
||||
return bloomWriter;
|
||||
|
@ -200,10 +198,4 @@ public final class BloomFilterFactory {
|
|||
public static int getBloomBlockSize(Configuration conf) {
|
||||
return conf.getInt(IO_STOREFILE_BLOOM_BLOCK_SIZE, 128 * 1024);
|
||||
}
|
||||
|
||||
/** @return whether to cache compound Bloom filter chunks on write */
|
||||
public static boolean cacheChunksOnWrite(Configuration conf) {
|
||||
return conf.getBoolean(IO_STOREFILE_BLOOM_CACHE_ON_WRITE, false);
|
||||
}
|
||||
|
||||
};
|
||||
|
|
|
@ -19,20 +19,19 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.util;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
|
||||
/**
|
||||
* Compression validation test. Checks compression is working. Be sure to run
|
||||
* on every node in your cluster.
|
||||
|
@ -106,13 +105,14 @@ public class CompressionTest {
|
|||
public static void doSmokeTest(FileSystem fs, Path path, String codec)
|
||||
throws Exception {
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
HFile.Writer writer = HFile.getWriterFactory(conf).createWriter(
|
||||
HFile.Writer writer =
|
||||
HFile.getWriterFactory(conf).createWriter(
|
||||
fs, path, HFile.DEFAULT_BLOCKSIZE, codec, null);
|
||||
writer.append(Bytes.toBytes("testkey"), Bytes.toBytes("testval"));
|
||||
writer.appendFileInfo(Bytes.toBytes("infokey"), Bytes.toBytes("infoval"));
|
||||
writer.close();
|
||||
|
||||
HFile.Reader reader = HFile.createReader(fs, path, null, false, false);
|
||||
HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf));
|
||||
reader.loadFileInfo();
|
||||
byte[] key = reader.getFirstKey();
|
||||
boolean rc = Bytes.toString(key).equals("testkey");
|
||||
|
|
|
@ -31,9 +31,10 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
|
@ -188,9 +189,9 @@ public class HFilePerformanceEvaluation {
|
|||
|
||||
@Override
|
||||
void setUp() throws Exception {
|
||||
writer = HFile.getWriterFactory(conf).createWriter(this.fs, this.mf,
|
||||
RFILE_BLOCKSIZE,
|
||||
(Compression.Algorithm) null, null);
|
||||
writer =
|
||||
HFile.getWriterFactory(conf).createWriter(this.fs,
|
||||
this.mf, RFILE_BLOCKSIZE, (Compression.Algorithm) null, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -226,7 +227,7 @@ public class HFilePerformanceEvaluation {
|
|||
|
||||
@Override
|
||||
void setUp() throws Exception {
|
||||
reader = HFile.createReader(this.fs, this.mf, null, false, false);
|
||||
reader = HFile.createReader(this.fs, this.mf, new CacheConfig(this.conf));
|
||||
this.reader.loadFileInfo();
|
||||
}
|
||||
|
||||
|
|
|
@ -20,21 +20,22 @@
|
|||
|
||||
package org.apache.hadoop.hbase.io;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.Test;
|
||||
|
||||
|
||||
public class TestHalfStoreFileReader {
|
||||
|
@ -63,8 +64,10 @@ public class TestHalfStoreFileReader {
|
|||
|
||||
Configuration conf = test_util.getConfiguration();
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
CacheConfig cacheConf = new CacheConfig(conf);
|
||||
|
||||
HFile.Writer w = HFile.getWriterFactory(conf).createWriter(fs, p, 1024,
|
||||
HFile.Writer w =
|
||||
HFile.getWriterFactory(conf, cacheConf).createWriter(fs, p, 1024,
|
||||
"none", KeyValue.KEY_COMPARATOR);
|
||||
|
||||
// write some things.
|
||||
|
@ -74,7 +77,7 @@ public class TestHalfStoreFileReader {
|
|||
}
|
||||
w.close();
|
||||
|
||||
HFile.Reader r = HFile.createReader(fs, p, null, false, false);
|
||||
HFile.Reader r = HFile.createReader(fs, p, cacheConf);
|
||||
r.loadFileInfo();
|
||||
byte [] midkey = r.midkey();
|
||||
KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
|
||||
|
@ -83,16 +86,17 @@ public class TestHalfStoreFileReader {
|
|||
//System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));
|
||||
|
||||
Reference bottom = new Reference(midkey, Reference.Range.bottom);
|
||||
doTestOfScanAndReseek(p, fs, bottom);
|
||||
doTestOfScanAndReseek(p, fs, bottom, cacheConf);
|
||||
|
||||
Reference top = new Reference(midkey, Reference.Range.top);
|
||||
doTestOfScanAndReseek(p, fs, top);
|
||||
doTestOfScanAndReseek(p, fs, top, cacheConf);
|
||||
}
|
||||
|
||||
private void doTestOfScanAndReseek(Path p, FileSystem fs, Reference bottom)
|
||||
private void doTestOfScanAndReseek(Path p, FileSystem fs, Reference bottom,
|
||||
CacheConfig cacheConf)
|
||||
throws IOException {
|
||||
final HalfStoreFileReader halfreader =
|
||||
new HalfStoreFileReader(fs, p, null, bottom);
|
||||
new HalfStoreFileReader(fs, p, cacheConf, bottom);
|
||||
halfreader.loadFileInfo();
|
||||
final HFileScanner scanner = halfreader.getScanner(false, false);
|
||||
|
||||
|
|
|
@ -67,7 +67,10 @@ public class RandomSeek {
|
|||
Path path = new Path("/Users/ryan/rfile.big.txt");
|
||||
long start = System.currentTimeMillis();
|
||||
SimpleBlockCache cache = new SimpleBlockCache();
|
||||
Reader reader = HFile.createReader(lfs, path, cache, false, false);
|
||||
CacheConfig cacheConf = new CacheConfig(cache, true, false, false, false,
|
||||
false, false, false);
|
||||
|
||||
Reader reader = HFile.createReader(lfs, path, cacheConf);
|
||||
reader.loadFileInfo();
|
||||
System.out.println(reader.getTrailer());
|
||||
long end = System.currentTimeMillis();
|
||||
|
|
|
@ -64,6 +64,7 @@ public class TestCacheOnWrite {
|
|||
private static final HBaseTestingUtility TEST_UTIL =
|
||||
new HBaseTestingUtility();
|
||||
private Configuration conf;
|
||||
private CacheConfig cacheConf;
|
||||
private FileSystem fs;
|
||||
private Random rand = new Random(12983177L);
|
||||
private Path storeFilePath;
|
||||
|
@ -82,11 +83,11 @@ public class TestCacheOnWrite {
|
|||
KeyValue.Type.values().length - 2;
|
||||
|
||||
private static enum CacheOnWriteType {
|
||||
DATA_BLOCKS(BlockType.DATA, HFile.CACHE_BLOCKS_ON_WRITE_KEY),
|
||||
DATA_BLOCKS(BlockType.DATA, CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY),
|
||||
BLOOM_BLOCKS(BlockType.BLOOM_CHUNK,
|
||||
BloomFilterFactory.IO_STOREFILE_BLOOM_CACHE_ON_WRITE),
|
||||
CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY),
|
||||
INDEX_BLOCKS(BlockType.LEAF_INDEX,
|
||||
HFileBlockIndex.CACHE_INDEX_BLOCKS_ON_WRITE_KEY);
|
||||
CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY);
|
||||
|
||||
private final String confKey;
|
||||
private final BlockType inlineBlockType;
|
||||
|
@ -114,6 +115,7 @@ public class TestCacheOnWrite {
|
|||
this.cowType = cowType;
|
||||
this.compress = compress;
|
||||
testName = "[cacheOnWrite=" + cowType + ", compress=" + compress + "]";
|
||||
System.out.println(testName);
|
||||
}
|
||||
|
||||
@Parameters
|
||||
|
@ -134,9 +136,17 @@ public class TestCacheOnWrite {
|
|||
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
|
||||
conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
|
||||
BLOOM_BLOCK_SIZE);
|
||||
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
|
||||
cowType.shouldBeCached(BlockType.DATA));
|
||||
conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
|
||||
cowType.shouldBeCached(BlockType.LEAF_INDEX));
|
||||
conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
|
||||
cowType.shouldBeCached(BlockType.BLOOM_CHUNK));
|
||||
cowType.modifyConf(conf);
|
||||
fs = FileSystem.get(conf);
|
||||
blockCache = StoreFile.getBlockCache(conf);
|
||||
cacheConf = new CacheConfig(conf);
|
||||
blockCache = cacheConf.getBlockCache();
|
||||
System.out.println("setUp()");
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -152,7 +162,7 @@ public class TestCacheOnWrite {
|
|||
|
||||
private void readStoreFile() throws IOException {
|
||||
HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs,
|
||||
storeFilePath, null, false, false);
|
||||
storeFilePath, cacheConf);
|
||||
LOG.info("HFile information: " + reader);
|
||||
HFileScanner scanner = reader.getScanner(false, false);
|
||||
assertTrue(testName, scanner.seekTo());
|
||||
|
@ -167,8 +177,8 @@ public class TestCacheOnWrite {
|
|||
if (prevBlock != null) {
|
||||
onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
|
||||
}
|
||||
// Flags: cache the block, use pread, this is not a compaction.
|
||||
HFileBlock block = reader.readBlock(offset, onDiskSize, true, true,
|
||||
// Flags: don't cache the block, use pread, this is not a compaction.
|
||||
HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
|
||||
false);
|
||||
String blockCacheKey = HFile.getBlockCacheKey(reader.getName(), offset);
|
||||
boolean isCached = blockCache.getBlock(blockCacheKey, true) != null;
|
||||
|
@ -210,7 +220,7 @@ public class TestCacheOnWrite {
|
|||
"test_cache_on_write");
|
||||
StoreFile.Writer sfw = StoreFile.createWriter(fs, storeFileParentDir,
|
||||
DATA_BLOCK_SIZE, compress, KeyValue.COMPARATOR, conf,
|
||||
StoreFile.BloomType.ROWCOL, NUM_KV);
|
||||
cacheConf, StoreFile.BloomType.ROWCOL, NUM_KV);
|
||||
|
||||
final int rowLen = 32;
|
||||
for (int i = 0; i < NUM_KV; ++i) {
|
||||
|
|
|
@ -53,6 +53,7 @@ public class TestHFile extends HBaseTestCase {
|
|||
HBaseTestingUtility.getTestDir("TestHFile").toString();
|
||||
private final int minBlockSize = 512;
|
||||
private static String localFormatter = "%010d";
|
||||
private static CacheConfig cacheConf = null;
|
||||
|
||||
/**
|
||||
* Test empty HFile.
|
||||
|
@ -60,10 +61,11 @@ public class TestHFile extends HBaseTestCase {
|
|||
* @throws IOException
|
||||
*/
|
||||
public void testEmptyHFile() throws IOException {
|
||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||
Path f = new Path(ROOT_DIR, getName());
|
||||
Writer w = HFile.getWriterFactory(conf).createWriter(this.fs, f);
|
||||
Writer w = HFile.getWriterFactory(conf, cacheConf).createWriter(this.fs, f);
|
||||
w.close();
|
||||
Reader r = HFile.createReader(fs, f, null, false, false);
|
||||
Reader r = HFile.createReader(fs, f, cacheConf);
|
||||
r.loadFileInfo();
|
||||
assertNull(r.getFirstKey());
|
||||
assertNull(r.getLastKey());
|
||||
|
@ -130,16 +132,18 @@ public class TestHFile extends HBaseTestCase {
|
|||
* test none codecs
|
||||
*/
|
||||
void basicWithSomeCodec(String codec) throws IOException {
|
||||
Path ncTFile = new Path(ROOT_DIR, "basic.hfile");
|
||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||
Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString());
|
||||
FSDataOutputStream fout = createFSOutput(ncTFile);
|
||||
Writer writer = HFile.getWriterFactory(conf).createWriter(fout,
|
||||
Writer writer = HFile.getWriterFactory(conf, cacheConf).createWriter(fout,
|
||||
minBlockSize, Compression.getCompressionAlgorithmByName(codec), null);
|
||||
LOG.info(writer);
|
||||
writeRecords(writer);
|
||||
fout.close();
|
||||
FSDataInputStream fin = fs.open(ncTFile);
|
||||
Reader reader = HFile.createReader(ncTFile, fs.open(ncTFile),
|
||||
fs.getFileStatus(ncTFile).getLen(), null, false, false);
|
||||
fs.getFileStatus(ncTFile).getLen(), cacheConf);
|
||||
System.out.println(cacheConf.toString());
|
||||
// Load up the index.
|
||||
reader.loadFileInfo();
|
||||
// Get a scanner that caches and that does not use pread.
|
||||
|
@ -205,9 +209,10 @@ public class TestHFile extends HBaseTestCase {
|
|||
}
|
||||
|
||||
private void metablocks(final String compress) throws Exception {
|
||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||
Path mFile = new Path(ROOT_DIR, "meta.hfile");
|
||||
FSDataOutputStream fout = createFSOutput(mFile);
|
||||
Writer writer = HFile.getWriterFactory(conf).createWriter(fout,
|
||||
Writer writer = HFile.getWriterFactory(conf, cacheConf).createWriter(fout,
|
||||
minBlockSize, Compression.getCompressionAlgorithmByName(compress),
|
||||
null);
|
||||
someTestingWithMetaBlock(writer);
|
||||
|
@ -215,7 +220,7 @@ public class TestHFile extends HBaseTestCase {
|
|||
fout.close();
|
||||
FSDataInputStream fin = fs.open(mFile);
|
||||
Reader reader = HFile.createReader(mFile, fs.open(mFile),
|
||||
this.fs.getFileStatus(mFile).getLen(), null, false, false);
|
||||
this.fs.getFileStatus(mFile).getLen(), cacheConf);
|
||||
reader.loadFileInfo();
|
||||
// No data -- this should return false.
|
||||
assertFalse(reader.getScanner(false, false).seekTo());
|
||||
|
@ -232,16 +237,17 @@ public class TestHFile extends HBaseTestCase {
|
|||
}
|
||||
|
||||
public void testNullMetaBlocks() throws Exception {
|
||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||
for (Compression.Algorithm compressAlgo :
|
||||
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
|
||||
Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
|
||||
FSDataOutputStream fout = createFSOutput(mFile);
|
||||
Writer writer = HFile.getWriterFactory(conf).createWriter(fout,
|
||||
Writer writer = HFile.getWriterFactory(conf, cacheConf).createWriter(fout,
|
||||
minBlockSize, compressAlgo, null);
|
||||
writer.append("foo".getBytes(), "value".getBytes());
|
||||
writer.close();
|
||||
fout.close();
|
||||
Reader reader = HFile.createReader(fs, mFile, null, false, false);
|
||||
Reader reader = HFile.createReader(fs, mFile, cacheConf);
|
||||
reader.loadFileInfo();
|
||||
assertNull(reader.getMetaBlock("non-existant", false));
|
||||
}
|
||||
|
@ -257,9 +263,10 @@ public class TestHFile extends HBaseTestCase {
|
|||
}
|
||||
|
||||
public void testComparator() throws IOException {
|
||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||
Path mFile = new Path(ROOT_DIR, "meta.tfile");
|
||||
FSDataOutputStream fout = createFSOutput(mFile);
|
||||
Writer writer = HFile.getWriterFactory(conf).createWriter(fout,
|
||||
Writer writer = HFile.getWriterFactory(conf, cacheConf).createWriter(fout,
|
||||
minBlockSize, (Compression.Algorithm) null, new KeyComparator() {
|
||||
@Override
|
||||
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2,
|
||||
|
|
|
@ -460,7 +460,8 @@ public class TestHFileBlockIndex {
|
|||
public void testHFileWriterAndReader() throws IOException {
|
||||
Path hfilePath = new Path(HBaseTestingUtility.getTestDir(),
|
||||
"hfile_for_block_index");
|
||||
BlockCache blockCache = StoreFile.getBlockCache(conf);
|
||||
CacheConfig cacheConf = new CacheConfig(conf);
|
||||
BlockCache blockCache = cacheConf.getBlockCache();
|
||||
|
||||
for (int testI = 0; testI < INDEX_CHUNK_SIZES.length; ++testI) {
|
||||
int indexBlockSize = INDEX_CHUNK_SIZES[testI];
|
||||
|
@ -478,7 +479,8 @@ public class TestHFileBlockIndex {
|
|||
|
||||
// Write the HFile
|
||||
{
|
||||
HFile.Writer writer = HFile.getWriterFactory(conf).createWriter(fs,
|
||||
HFile.Writer writer =
|
||||
HFile.getWriterFactory(conf, cacheConf).createWriter(fs,
|
||||
hfilePath, SMALL_BLOCK_SIZE, compr, KeyValue.KEY_COMPARATOR);
|
||||
Random rand = new Random(19231737);
|
||||
|
||||
|
@ -505,8 +507,7 @@ public class TestHFileBlockIndex {
|
|||
}
|
||||
|
||||
// Read the HFile
|
||||
HFile.Reader reader = HFile.createReader(fs, hfilePath, blockCache,
|
||||
false, true);
|
||||
HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConf);
|
||||
assertEquals(expectedNumLevels,
|
||||
reader.getTrailer().getNumDataIndexLevels());
|
||||
|
||||
|
|
|
@ -157,7 +157,8 @@ public class TestHFilePerformance extends TestCase {
|
|||
|
||||
if ("HFile".equals(fileType)){
|
||||
System.out.println("HFile write method: ");
|
||||
HFile.Writer writer = HFile.getWriterFactory(conf).createWriter(fout,
|
||||
HFile.Writer writer =
|
||||
HFile.getWriterFactory(conf).createWriter(fout,
|
||||
minBlockSize, codecName, null);
|
||||
|
||||
// Writing value in one shot.
|
||||
|
@ -237,7 +238,7 @@ public class TestHFilePerformance extends TestCase {
|
|||
|
||||
if ("HFile".equals(fileType)){
|
||||
HFile.Reader reader = HFile.createReader(path, fs.open(path),
|
||||
fs.getFileStatus(path).getLen(), null, false, false);
|
||||
fs.getFileStatus(path).getLen(), new CacheConfig(conf));
|
||||
reader.loadFileInfo();
|
||||
switch (method) {
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ public class TestHFileReaderV1 {
|
|||
"8e8ab58dcf39412da19833fcd8f687ac");
|
||||
Path existingHFilePath = new Path(url.getPath());
|
||||
HFile.Reader reader =
|
||||
HFile.createReader(fs, existingHFilePath, null, false, false);
|
||||
HFile.createReader(fs, existingHFilePath, new CacheConfig(conf));
|
||||
reader.loadFileInfo();
|
||||
FixedFileTrailer trailer = reader.getTrailer();
|
||||
|
||||
|
|
|
@ -118,7 +118,8 @@ public class TestHFileSeek extends TestCase {
|
|||
long totalBytes = 0;
|
||||
FSDataOutputStream fout = createFSOutput(path, fs);
|
||||
try {
|
||||
Writer writer = HFile.getWriterFactory(conf).createWriter(fout,
|
||||
Writer writer =
|
||||
HFile.getWriterFactory(conf).createWriter(fout,
|
||||
options.minBlockSize, options.compress, null);
|
||||
try {
|
||||
BytesWritable key = new BytesWritable();
|
||||
|
@ -164,7 +165,7 @@ public class TestHFileSeek extends TestCase {
|
|||
long totalBytes = 0;
|
||||
FSDataInputStream fsdis = fs.open(path);
|
||||
Reader reader = HFile.createReader(path, fsdis,
|
||||
fs.getFileStatus(path).getLen(), null, false, false);
|
||||
fs.getFileStatus(path).getLen(), new CacheConfig(conf));
|
||||
reader.loadFileInfo();
|
||||
KeySampler kSampler =
|
||||
new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
|
||||
|
|
|
@ -67,8 +67,8 @@ public class TestHFileWriterV2 {
|
|||
"testHFileFormatV2");
|
||||
|
||||
final Compression.Algorithm COMPRESS_ALGO = Compression.Algorithm.GZ;
|
||||
HFileWriterV2 writer = new HFileWriterV2(conf, fs, hfilePath, 4096,
|
||||
COMPRESS_ALGO, KeyValue.KEY_COMPARATOR);
|
||||
HFileWriterV2 writer = new HFileWriterV2(conf, new CacheConfig(conf), fs,
|
||||
hfilePath, 4096, COMPRESS_ALGO, KeyValue.KEY_COMPARATOR);
|
||||
|
||||
long totalKeyLength = 0;
|
||||
long totalValueLength = 0;
|
||||
|
|
|
@ -42,8 +42,10 @@ public class TestReseekTo {
|
|||
|
||||
Path ncTFile = new Path(HBaseTestingUtility.getTestDir(), "basic.hfile");
|
||||
FSDataOutputStream fout = TEST_UTIL.getTestFileSystem().create(ncTFile);
|
||||
CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
|
||||
HFile.Writer writer = HFile.getWriterFactory(
|
||||
TEST_UTIL.getConfiguration()).createWriter(fout, 4000, "none", null);
|
||||
TEST_UTIL.getConfiguration(), cacheConf).createWriter(
|
||||
fout, 4000, "none", null);
|
||||
int numberOfKeys = 1000;
|
||||
|
||||
String valueString = "Value";
|
||||
|
@ -61,7 +63,7 @@ public class TestReseekTo {
|
|||
fout.close();
|
||||
|
||||
HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(),
|
||||
ncTFile, null, false, false);
|
||||
ncTFile, cacheConf);
|
||||
reader.loadFileInfo();
|
||||
HFileScanner scanner = reader.getScanner(false, true);
|
||||
|
||||
|
|
|
@ -45,7 +45,8 @@ public class TestSeekTo extends HBaseTestCase {
|
|||
Path ncTFile = new Path(this.testDir, "basic.hfile");
|
||||
FSDataOutputStream fout = this.fs.create(ncTFile);
|
||||
int blocksize = toKV("a").getLength() * 3;
|
||||
HFile.Writer writer = HFile.getWriterFactory(conf).createWriter(fout,
|
||||
HFile.Writer writer =
|
||||
HFile.getWriterFactory(conf).createWriter(fout,
|
||||
blocksize, "none", null);
|
||||
// 4 bytes * 3 * 2 for each key/value +
|
||||
// 3 for keys, 15 for values = 42 (woot)
|
||||
|
@ -62,7 +63,7 @@ public class TestSeekTo extends HBaseTestCase {
|
|||
|
||||
public void testSeekBefore() throws Exception {
|
||||
Path p = makeNewFile();
|
||||
HFile.Reader reader = HFile.createReader(fs, p, null, false, false);
|
||||
HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf));
|
||||
reader.loadFileInfo();
|
||||
HFileScanner scanner = reader.getScanner(false, true);
|
||||
assertEquals(false, scanner.seekBefore(toKV("a").getKey()));
|
||||
|
@ -95,7 +96,7 @@ public class TestSeekTo extends HBaseTestCase {
|
|||
|
||||
public void testSeekTo() throws Exception {
|
||||
Path p = makeNewFile();
|
||||
HFile.Reader reader = HFile.createReader(fs, p, null, false, false);
|
||||
HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf));
|
||||
reader.loadFileInfo();
|
||||
assertEquals(2, reader.getDataBlockIndexReader().getRootBlockCount());
|
||||
HFileScanner scanner = reader.getScanner(false, true);
|
||||
|
@ -115,7 +116,7 @@ public class TestSeekTo extends HBaseTestCase {
|
|||
|
||||
public void testBlockContainingKey() throws Exception {
|
||||
Path p = makeNewFile();
|
||||
HFile.Reader reader = HFile.createReader(fs, p, null, false, false);
|
||||
HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf));
|
||||
reader.loadFileInfo();
|
||||
HFileBlockIndex.BlockIndexReader blockIndexReader =
|
||||
reader.getDataBlockIndexReader();
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.client.Result;
|
|||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
|
@ -281,8 +282,8 @@ public class TestHFileOutputFormat {
|
|||
FileStatus[] file = fs.listStatus(sub3[0].getPath());
|
||||
|
||||
// open as HFile Reader and pull out TIMERANGE FileInfo.
|
||||
HFile.Reader rd = HFile.createReader(fs, file[0].getPath(), null, true,
|
||||
false);
|
||||
HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
|
||||
new CacheConfig(conf));
|
||||
Map<byte[],byte[]> finfo = rd.loadFileInfo();
|
||||
byte[] range = finfo.get("TIMERANGE".getBytes());
|
||||
assertNotNull(range);
|
||||
|
@ -608,8 +609,8 @@ public class TestHFileOutputFormat {
|
|||
// verify that the compression on this file matches the configured
|
||||
// compression
|
||||
Path dataFilePath = fileSystem.listStatus(f.getPath())[0].getPath();
|
||||
Reader reader = HFile.createReader(fileSystem, dataFilePath, null,
|
||||
false, true);
|
||||
Reader reader = HFile.createReader(fileSystem, dataFilePath,
|
||||
new CacheConfig(conf));
|
||||
reader.loadFileInfo();
|
||||
assertEquals("Incorrect compression used for column family " + familyStr
|
||||
+ "(reader: " + reader + ")",
|
||||
|
|
|
@ -19,10 +19,11 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.mapreduce;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
|
@ -41,8 +43,6 @@ import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* Test cases for the "load" half of the HFileOutputFormat bulk load
|
||||
* functionality. These tests run faster than the full MR cluster
|
||||
|
@ -179,7 +179,7 @@ public class TestLoadIncrementalHFiles {
|
|||
private int verifyHFile(Path p) throws IOException {
|
||||
Configuration conf = util.getConfiguration();
|
||||
HFile.Reader reader = HFile.createReader(
|
||||
p.getFileSystem(conf), p, null, false, false);
|
||||
p.getFileSystem(conf), p, new CacheConfig(conf));
|
||||
reader.loadFileInfo();
|
||||
HFileScanner scanner = reader.getScanner(false, false);
|
||||
scanner.seekTo();
|
||||
|
@ -203,7 +203,8 @@ public class TestLoadIncrementalHFiles {
|
|||
byte[] family, byte[] qualifier,
|
||||
byte[] startKey, byte[] endKey, int numRows) throws IOException
|
||||
{
|
||||
HFile.Writer writer = HFile.getWriterFactory(conf).createWriter(fs, path,
|
||||
HFile.Writer writer =
|
||||
HFile.getWriterFactory(conf, new CacheConfig(conf)).createWriter(fs, path,
|
||||
BLOCKSIZE, COMPRESSION,
|
||||
KeyValue.KEY_COMPARATOR);
|
||||
long now = System.currentTimeMillis();
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
|
@ -31,18 +32,18 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
|
|||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheStats;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestBlocksRead extends HBaseTestCase {
|
||||
|
@ -98,7 +99,7 @@ public class TestBlocksRead extends HBaseTestCase {
|
|||
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||
Path path = new Path(DIR + callingMethod);
|
||||
region = HRegion.createHRegion(info, path, conf, htd);
|
||||
blockCache = StoreFile.getBlockCache(conf);
|
||||
blockCache = new CacheConfig(conf).getBlockCache();
|
||||
}
|
||||
|
||||
private void putData(byte[] cf, String row, String col, long version)
|
||||
|
@ -178,6 +179,10 @@ public class TestBlocksRead extends HBaseTestCase {
|
|||
return blockCache.getStats().getRequestCount();
|
||||
}
|
||||
|
||||
private static long getBlkCount() {
|
||||
return blockCache.getBlockCount();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test # of blocks read for some simple seek cases.
|
||||
* @throws Exception
|
||||
|
@ -316,4 +321,49 @@ public class TestBlocksRead extends HBaseTestCase {
|
|||
verifyData(kvs[1], "row", "col2", 12);
|
||||
verifyData(kvs[2], "row", "col3", 13);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test # of blocks read to ensure disabling cache-fill on Scan works.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testBlocksStoredWhenCachingDisabled() throws Exception {
|
||||
byte [] TABLE = Bytes.toBytes("testBlocksReadWhenCachingDisabled");
|
||||
byte [] FAMILY = Bytes.toBytes("cf1");
|
||||
byte [][] FAMILIES = new byte[][] { FAMILY };
|
||||
|
||||
HBaseConfiguration conf = getConf();
|
||||
initHRegion(TABLE, getName(), conf, FAMILIES);
|
||||
|
||||
putData(FAMILY, "row", "col1", 1);
|
||||
putData(FAMILY, "row", "col2", 2);
|
||||
region.flushcache();
|
||||
|
||||
// Execute a scan with caching turned off
|
||||
// Expected blocks stored: 0
|
||||
long blocksStart = getBlkCount();
|
||||
Scan scan = new Scan();
|
||||
scan.setCacheBlocks(false);
|
||||
RegionScanner rs = region.getScanner(scan);
|
||||
List<KeyValue> result = new ArrayList<KeyValue>(2);
|
||||
rs.next(result);
|
||||
assertEquals(2, result.size());
|
||||
rs.close();
|
||||
long blocksEnd = getBlkCount();
|
||||
|
||||
assertEquals(blocksStart, blocksEnd);
|
||||
|
||||
// Execute with caching turned on
|
||||
// Expected blocks stored: 2
|
||||
blocksStart = blocksEnd;
|
||||
scan.setCacheBlocks(true);
|
||||
rs = region.getScanner(scan);
|
||||
result = new ArrayList<KeyValue>(2);
|
||||
rs.next(result);
|
||||
assertEquals(2, result.size());
|
||||
rs.close();
|
||||
blocksEnd = getBlkCount();
|
||||
|
||||
assertEquals(2, blocksEnd - blocksStart);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,9 +36,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.TestWALReplay;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
@ -100,8 +99,9 @@ public class TestCompactSelection extends TestCase {
|
|||
boolean isRef = false;
|
||||
|
||||
MockStoreFile(long length, boolean isRef) throws IOException {
|
||||
super(TEST_UTIL.getTestFileSystem(), TEST_FILE, false,
|
||||
TEST_UTIL.getConfiguration(), BloomType.NONE, false);
|
||||
super(TEST_UTIL.getTestFileSystem(), TEST_FILE,
|
||||
TEST_UTIL.getConfiguration(),
|
||||
new CacheConfig(TEST_UTIL.getConfiguration()), BloomType.NONE);
|
||||
this.length = length;
|
||||
this.isRef = isRef;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,10 @@
|
|||
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -38,6 +41,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
|
||||
|
@ -105,6 +109,7 @@ public class TestCompoundBloomFilter {
|
|||
}
|
||||
|
||||
private static Configuration conf;
|
||||
private static CacheConfig cacheConf;
|
||||
private FileSystem fs;
|
||||
private BlockCache blockCache;
|
||||
|
||||
|
@ -123,7 +128,8 @@ public class TestCompoundBloomFilter {
|
|||
|
||||
fs = FileSystem.get(conf);
|
||||
|
||||
blockCache = StoreFile.getBlockCache(conf);
|
||||
cacheConf = new CacheConfig(conf);
|
||||
blockCache = cacheConf.getBlockCache();
|
||||
assertNotNull(blockCache);
|
||||
}
|
||||
|
||||
|
@ -187,7 +193,7 @@ public class TestCompoundBloomFilter {
|
|||
|
||||
private void readStoreFile(int t, BloomType bt, List<KeyValue> kvs,
|
||||
Path sfPath) throws IOException {
|
||||
StoreFile sf = new StoreFile(fs, sfPath, true, conf, bt, false);
|
||||
StoreFile sf = new StoreFile(fs, sfPath, conf, cacheConf, bt);
|
||||
StoreFile.Reader r = sf.createReader();
|
||||
final boolean pread = true; // does not really matter
|
||||
StoreFileScanner scanner = r.getStoreFileScanner(true, pread);
|
||||
|
@ -283,11 +289,12 @@ public class TestCompoundBloomFilter {
|
|||
throws IOException {
|
||||
conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
|
||||
BLOOM_BLOCK_SIZES[t]);
|
||||
conf.setBoolean(HFile.CACHE_BLOCKS_ON_WRITE_KEY, true);
|
||||
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
|
||||
cacheConf = new CacheConfig(conf);
|
||||
|
||||
StoreFile.Writer w = StoreFile.createWriter(fs,
|
||||
HBaseTestingUtility.getTestDir(), BLOCK_SIZES[t], null, null, conf,
|
||||
bt, 0);
|
||||
cacheConf, bt, 0);
|
||||
|
||||
assertTrue(w.hasBloom());
|
||||
assertTrue(w.getBloomWriter() instanceof CompoundBloomFilterWriter);
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -70,13 +71,14 @@ public class TestFSErrorsExposed {
|
|||
HBaseTestingUtility.getTestDir("internalScannerExposesErrors"),
|
||||
"regionname"), "familyname");
|
||||
FaultyFileSystem fs = new FaultyFileSystem(util.getTestFileSystem());
|
||||
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
|
||||
StoreFile.Writer writer = StoreFile.createWriter(fs, hfilePath, 2*1024,
|
||||
util.getConfiguration());
|
||||
util.getConfiguration(), cacheConf);
|
||||
TestStoreFile.writeStoreFile(
|
||||
writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
|
||||
|
||||
StoreFile sf = new StoreFile(fs, writer.getPath(), false,
|
||||
util.getConfiguration(), StoreFile.BloomType.NONE, false);
|
||||
StoreFile sf = new StoreFile(fs, writer.getPath(),
|
||||
util.getConfiguration(), cacheConf, StoreFile.BloomType.NONE);
|
||||
StoreFile.Reader reader = sf.createReader();
|
||||
HFileScanner scanner = reader.getScanner(false, true);
|
||||
|
||||
|
@ -112,13 +114,14 @@ public class TestFSErrorsExposed {
|
|||
HBaseTestingUtility.getTestDir("internalScannerExposesErrors"),
|
||||
"regionname"), "familyname");
|
||||
FaultyFileSystem fs = new FaultyFileSystem(util.getTestFileSystem());
|
||||
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
|
||||
StoreFile.Writer writer = StoreFile.createWriter(fs, hfilePath, 2 * 1024,
|
||||
util.getConfiguration());
|
||||
util.getConfiguration(), cacheConf);
|
||||
TestStoreFile.writeStoreFile(
|
||||
writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
|
||||
|
||||
StoreFile sf = new StoreFile(fs, writer.getPath(), false,
|
||||
util.getConfiguration(), BloomType.NONE, false);
|
||||
StoreFile sf = new StoreFile(fs, writer.getPath(), util.getConfiguration(),
|
||||
cacheConf, BloomType.NONE);
|
||||
List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(
|
||||
Collections.singletonList(sf), false, true);
|
||||
KeyValueScanner scanner = scanners.get(0);
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
|
@ -209,7 +210,7 @@ public class TestStore extends TestCase {
|
|||
Configuration c = HBaseConfiguration.create();
|
||||
FileSystem fs = FileSystem.get(c);
|
||||
StoreFile.Writer w = StoreFile.createWriter(fs, storedir,
|
||||
StoreFile.DEFAULT_BLOCKSIZE_SMALL, c);
|
||||
StoreFile.DEFAULT_BLOCKSIZE_SMALL, c, new CacheConfig(c));
|
||||
w.appendMetadata(seqid + 1, false);
|
||||
w.close();
|
||||
this.store.close();
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.KeyValue;
|
|||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.Reference.Range;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheStats;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
|
@ -58,6 +59,7 @@ import com.google.common.collect.Lists;
|
|||
public class TestStoreFile extends HBaseTestCase {
|
||||
static final Log LOG = LogFactory.getLog(TestStoreFile.class);
|
||||
private MiniDFSCluster cluster;
|
||||
private CacheConfig cacheConf;
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
|
@ -66,6 +68,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||
this.cacheConf = new CacheConfig(conf);
|
||||
} catch (IOException e) {
|
||||
shutdownDfs(cluster);
|
||||
}
|
||||
|
@ -89,10 +92,10 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
// Make up a directory hierarchy that has a regiondir and familyname.
|
||||
StoreFile.Writer writer = StoreFile.createWriter(this.fs,
|
||||
new Path(new Path(this.testDir, "regionname"), "familyname"), 2 * 1024,
|
||||
conf);
|
||||
conf, cacheConf);
|
||||
writeStoreFile(writer);
|
||||
checkHalfHFile(new StoreFile(this.fs, writer.getPath(), true, conf,
|
||||
StoreFile.BloomType.NONE, false));
|
||||
checkHalfHFile(new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
|
||||
StoreFile.BloomType.NONE));
|
||||
}
|
||||
|
||||
private void writeStoreFile(final StoreFile.Writer writer) throws IOException {
|
||||
|
@ -130,10 +133,10 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
Path dir = new Path(storedir, "1234567890");
|
||||
// Make a store file and write data to it.
|
||||
StoreFile.Writer writer = StoreFile.createWriter(this.fs, dir, 8 * 1024,
|
||||
conf);
|
||||
conf, cacheConf);
|
||||
writeStoreFile(writer);
|
||||
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf,
|
||||
StoreFile.BloomType.NONE, false);
|
||||
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
|
||||
StoreFile.BloomType.NONE);
|
||||
StoreFile.Reader reader = hsf.createReader();
|
||||
// Split on a row, not in middle of row. Midkey returned by reader
|
||||
// may be in middle of row. Create new one with empty column and
|
||||
|
@ -144,8 +147,8 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
byte [] finalRow = kv.getRow();
|
||||
// Make a reference
|
||||
Path refPath = StoreFile.split(fs, dir, hsf, midRow, Range.top);
|
||||
StoreFile refHsf = new StoreFile(this.fs, refPath, true, conf,
|
||||
StoreFile.BloomType.NONE, false);
|
||||
StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
|
||||
StoreFile.BloomType.NONE);
|
||||
// Now confirm that I can read from the reference and that it only gets
|
||||
// keys from top half of the file.
|
||||
HFileScanner s = refHsf.createReader().getScanner(false, false);
|
||||
|
@ -181,10 +184,10 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
Path bottomPath = StoreFile.split(this.fs, bottomDir,
|
||||
f, midRow, Range.bottom);
|
||||
// Make readers on top and bottom.
|
||||
StoreFile.Reader top = new StoreFile(this.fs, topPath, true, conf,
|
||||
StoreFile.BloomType.NONE, false).createReader();
|
||||
StoreFile.Reader bottom = new StoreFile(this.fs, bottomPath, true, conf,
|
||||
StoreFile.BloomType.NONE, false).createReader();
|
||||
StoreFile.Reader top = new StoreFile(this.fs, topPath, conf, cacheConf,
|
||||
StoreFile.BloomType.NONE).createReader();
|
||||
StoreFile.Reader bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
|
||||
StoreFile.BloomType.NONE).createReader();
|
||||
ByteBuffer previous = null;
|
||||
LOG.info("Midkey: " + midKV.toString());
|
||||
ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midkey);
|
||||
|
@ -239,10 +242,10 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
|
||||
bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
|
||||
Range.bottom);
|
||||
top = new StoreFile(this.fs, topPath, true, conf,
|
||||
StoreFile.BloomType.NONE, false).createReader();
|
||||
bottom = new StoreFile(this.fs, bottomPath, true, conf,
|
||||
StoreFile.BloomType.NONE, false).createReader();
|
||||
top = new StoreFile(this.fs, topPath, conf, cacheConf,
|
||||
StoreFile.BloomType.NONE).createReader();
|
||||
bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
|
||||
StoreFile.BloomType.NONE).createReader();
|
||||
bottomScanner = bottom.getScanner(false, false);
|
||||
int count = 0;
|
||||
while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
|
||||
|
@ -284,10 +287,10 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
|
||||
bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
|
||||
Range.bottom);
|
||||
top = new StoreFile(this.fs, topPath, true, conf,
|
||||
StoreFile.BloomType.NONE, false).createReader();
|
||||
bottom = new StoreFile(this.fs, bottomPath, true, conf,
|
||||
StoreFile.BloomType.NONE, false).createReader();
|
||||
top = new StoreFile(this.fs, topPath, conf, cacheConf,
|
||||
StoreFile.BloomType.NONE).createReader();
|
||||
bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
|
||||
StoreFile.BloomType.NONE).createReader();
|
||||
first = true;
|
||||
bottomScanner = bottom.getScanner(false, false);
|
||||
while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
|
||||
|
@ -345,7 +348,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
}
|
||||
writer.close();
|
||||
|
||||
StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, false, false);
|
||||
StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf);
|
||||
reader.loadFileInfo();
|
||||
reader.loadBloomfilter();
|
||||
StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
|
||||
|
@ -386,7 +389,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
Path f = new Path(ROOT_DIR, getName());
|
||||
StoreFile.Writer writer = new StoreFile.Writer(fs, f,
|
||||
StoreFile.DEFAULT_BLOCKSIZE_SMALL, HFile.DEFAULT_COMPRESSION_ALGORITHM,
|
||||
conf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, 2000);
|
||||
conf, cacheConf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, 2000);
|
||||
|
||||
bloomWriteRead(writer, fs);
|
||||
}
|
||||
|
@ -413,11 +416,11 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
|
||||
for (int x : new int[]{0,1}) {
|
||||
// write the file
|
||||
Path f = new Path(ROOT_DIR, getName());
|
||||
Path f = new Path(ROOT_DIR, getName() + x);
|
||||
StoreFile.Writer writer = new StoreFile.Writer(fs, f,
|
||||
StoreFile.DEFAULT_BLOCKSIZE_SMALL,
|
||||
HFile.DEFAULT_COMPRESSION_ALGORITHM,
|
||||
conf, KeyValue.COMPARATOR, bt[x], expKeys[x]);
|
||||
conf, cacheConf, KeyValue.COMPARATOR, bt[x], expKeys[x]);
|
||||
|
||||
long now = System.currentTimeMillis();
|
||||
for (int i = 0; i < rowCount*2; i += 2) { // rows
|
||||
|
@ -434,10 +437,10 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
}
|
||||
writer.close();
|
||||
|
||||
StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, false, false);
|
||||
StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf);
|
||||
reader.loadFileInfo();
|
||||
reader.loadBloomfilter();
|
||||
StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
|
||||
StoreFileScanner scanner = reader.getStoreFileScanner(true, true);
|
||||
assertEquals(expKeys[x], reader.bloomFilter.getKeyCount());
|
||||
|
||||
// check false positives rate
|
||||
|
@ -487,7 +490,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
// this should not create a bloom because the max keys is too small
|
||||
StoreFile.Writer writer = new StoreFile.Writer(fs, f,
|
||||
StoreFile.DEFAULT_BLOCKSIZE_SMALL, HFile.DEFAULT_COMPRESSION_ALGORITHM,
|
||||
conf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, 2000);
|
||||
conf, cacheConf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, 2000);
|
||||
assertFalse(writer.hasBloom());
|
||||
writer.close();
|
||||
fs.delete(f, true);
|
||||
|
@ -510,7 +513,8 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
// because Java can't create a contiguous array > MAX_INT
|
||||
writer = new StoreFile.Writer(fs, f,
|
||||
StoreFile.DEFAULT_BLOCKSIZE_SMALL, HFile.DEFAULT_COMPRESSION_ALGORITHM,
|
||||
conf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, Integer.MAX_VALUE);
|
||||
conf, cacheConf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW,
|
||||
Integer.MAX_VALUE);
|
||||
assertFalse(writer.hasBloom());
|
||||
writer.close();
|
||||
fs.delete(f, true);
|
||||
|
@ -602,7 +606,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
"familyname");
|
||||
Path dir = new Path(storedir, "1234567890");
|
||||
StoreFile.Writer writer = StoreFile.createWriter(this.fs, dir, 8 * 1024,
|
||||
conf);
|
||||
conf, cacheConf);
|
||||
|
||||
List<KeyValue> kvList = getKeyValueSet(timestamps,numRows,
|
||||
family, qualifier);
|
||||
|
@ -613,8 +617,8 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
writer.appendMetadata(0, false);
|
||||
writer.close();
|
||||
|
||||
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf,
|
||||
StoreFile.BloomType.NONE, false);
|
||||
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
|
||||
StoreFile.BloomType.NONE);
|
||||
StoreFile.Reader reader = hsf.createReader();
|
||||
StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
|
||||
TreeSet<byte[]> columns = new TreeSet<byte[]>();
|
||||
|
@ -640,14 +644,13 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
|
||||
public void testCacheOnWriteEvictOnClose() throws Exception {
|
||||
Configuration conf = this.conf;
|
||||
conf.setBoolean("hbase.rs.evictblocksonclose", false);
|
||||
|
||||
// Find a home for our files
|
||||
Path baseDir = new Path(new Path(this.testDir, "regionname"),
|
||||
"twoCOWEOC");
|
||||
|
||||
// Grab the block cache and get the initial hit/miss counts
|
||||
BlockCache bc = StoreFile.getBlockCache(conf);
|
||||
BlockCache bc = new CacheConfig(conf).getBlockCache();
|
||||
assertNotNull(bc);
|
||||
CacheStats cs = bc.getStats();
|
||||
long startHit = cs.getHitCount();
|
||||
|
@ -655,11 +658,12 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
long startEvicted = cs.getEvictedCount();
|
||||
|
||||
// Let's write a StoreFile with three blocks, with cache on write off
|
||||
conf.setBoolean(HFile.CACHE_BLOCKS_ON_WRITE_KEY, false);
|
||||
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
|
||||
CacheConfig cacheConf = new CacheConfig(conf);
|
||||
Path pathCowOff = new Path(baseDir, "123456789");
|
||||
StoreFile.Writer writer = writeStoreFile(conf, pathCowOff, 3);
|
||||
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf,
|
||||
StoreFile.BloomType.NONE, false);
|
||||
StoreFile.Writer writer = writeStoreFile(conf, cacheConf, pathCowOff, 3);
|
||||
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
|
||||
StoreFile.BloomType.NONE);
|
||||
LOG.debug(hsf.getPath().toString());
|
||||
|
||||
// Read this file, we should see 3 misses
|
||||
|
@ -676,11 +680,12 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
reader.close();
|
||||
|
||||
// Now write a StoreFile with three blocks, with cache on write on
|
||||
conf.setBoolean(HFile.CACHE_BLOCKS_ON_WRITE_KEY, true);
|
||||
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
|
||||
cacheConf = new CacheConfig(conf);
|
||||
Path pathCowOn = new Path(baseDir, "123456788");
|
||||
writer = writeStoreFile(conf, pathCowOn, 3);
|
||||
hsf = new StoreFile(this.fs, writer.getPath(), true, conf,
|
||||
StoreFile.BloomType.NONE, false);
|
||||
writer = writeStoreFile(conf, cacheConf, pathCowOn, 3);
|
||||
hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
|
||||
StoreFile.BloomType.NONE);
|
||||
|
||||
// Read this file, we should see 3 hits
|
||||
reader = hsf.createReader();
|
||||
|
@ -695,14 +700,14 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
reader.close();
|
||||
|
||||
// Let's read back the two files to ensure the blocks exactly match
|
||||
hsf = new StoreFile(this.fs, pathCowOff, true, conf,
|
||||
StoreFile.BloomType.NONE, false);
|
||||
hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
|
||||
StoreFile.BloomType.NONE);
|
||||
StoreFile.Reader readerOne = hsf.createReader();
|
||||
readerOne.loadFileInfo();
|
||||
StoreFileScanner scannerOne = readerOne.getStoreFileScanner(true, true);
|
||||
scannerOne.seek(KeyValue.LOWESTKEY);
|
||||
hsf = new StoreFile(this.fs, pathCowOn, true, conf,
|
||||
StoreFile.BloomType.NONE, false);
|
||||
hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
|
||||
StoreFile.BloomType.NONE);
|
||||
StoreFile.Reader readerTwo = hsf.createReader();
|
||||
readerTwo.loadFileInfo();
|
||||
StoreFileScanner scannerTwo = readerTwo.getStoreFileScanner(true, true);
|
||||
|
@ -731,8 +736,9 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
|
||||
// Let's close the first file with evict on close turned on
|
||||
conf.setBoolean("hbase.rs.evictblocksonclose", true);
|
||||
hsf = new StoreFile(this.fs, pathCowOff, true, conf,
|
||||
StoreFile.BloomType.NONE, false);
|
||||
cacheConf = new CacheConfig(conf);
|
||||
hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
|
||||
StoreFile.BloomType.NONE);
|
||||
reader = hsf.createReader();
|
||||
reader.close();
|
||||
|
||||
|
@ -744,8 +750,9 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
|
||||
// Let's close the second file with evict on close turned off
|
||||
conf.setBoolean("hbase.rs.evictblocksonclose", false);
|
||||
hsf = new StoreFile(this.fs, pathCowOn, true, conf,
|
||||
StoreFile.BloomType.NONE, false);
|
||||
cacheConf = new CacheConfig(conf);
|
||||
hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
|
||||
StoreFile.BloomType.NONE);
|
||||
reader = hsf.createReader();
|
||||
reader.close();
|
||||
|
||||
|
@ -755,8 +762,8 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
assertEquals(startEvicted, cs.getEvictedCount());
|
||||
}
|
||||
|
||||
private StoreFile.Writer writeStoreFile(Configuration conf, Path path,
|
||||
int numBlocks)
|
||||
private StoreFile.Writer writeStoreFile(Configuration conf,
|
||||
CacheConfig cacheConf, Path path, int numBlocks)
|
||||
throws IOException {
|
||||
// Let's put ~5 small KVs in each block, so let's make 5*numBlocks KVs
|
||||
int numKVs = 5 * numBlocks;
|
||||
|
@ -771,7 +778,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
int blockSize = totalSize / numBlocks;
|
||||
StoreFile.Writer writer = new StoreFile.Writer(fs, path, blockSize,
|
||||
HFile.DEFAULT_COMPRESSION_ALGORITHM,
|
||||
conf, KeyValue.COMPARATOR, StoreFile.BloomType.NONE, 2000);
|
||||
conf, cacheConf, KeyValue.COMPARATOR, StoreFile.BloomType.NONE, 2000);
|
||||
// We'll write N-1 KVs to ensure we don't write an extra block
|
||||
kvs.remove(kvs.size()-1);
|
||||
for (KeyValue kv : kvs) {
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.Row;
|
|||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -100,7 +101,8 @@ public class TestStoreFileBlockCacheSummary {
|
|||
scan(ht, FAMILY);
|
||||
scan(ht2, FAMILY);
|
||||
|
||||
BlockCache bc = StoreFile.getBlockCache(TEST_UTIL.getConfiguration());
|
||||
BlockCache bc =
|
||||
new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
|
||||
List<BlockCacheColumnFamilySummary> bcs =
|
||||
bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
|
||||
LOG.info("blockCacheSummary: " + bcs);
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.KeyValue;
|
|||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.regionserver.FlushRequester;
|
||||
|
@ -199,7 +200,8 @@ public class TestWALReplay {
|
|||
HLog wal = createWAL(this.conf);
|
||||
HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);
|
||||
Path f = new Path(basedir, "hfile");
|
||||
HFile.Writer writer = HFile.getWriterFactory(conf).createWriter(this.fs, f);
|
||||
HFile.Writer writer =
|
||||
HFile.getWriterFactory(conf).createWriter(this.fs, f);
|
||||
byte [] family = htd.getFamilies().iterator().next().getName();
|
||||
byte [] row = Bytes.toBytes(tableNameStr);
|
||||
writer.append(new KeyValue(row, family, family, row));
|
||||
|
|
Loading…
Reference in New Issue