HBASE-4582 Store.java cleanup (failing TestHeapSize and has warnings) (jgray)
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1182530 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
a548c2084c
commit
c2681355b5
|
@ -352,6 +352,7 @@ Release 0.92.0 - Unreleased
|
|||
and regionservers (Lars H)
|
||||
HBASE-4555 TestShell seems passed, but actually errors seen in test output
|
||||
file (Mingjie Lai)
|
||||
HBASE-4582 Store.java cleanup (failing TestHeapSize and has warnings)
|
||||
|
||||
TESTS
|
||||
HBASE-4450 test for number of blocks read: to serve as baseline for expected
|
||||
|
|
|
@ -59,7 +59,6 @@ import org.apache.hadoop.util.StringUtils;
|
|||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
/**
|
||||
|
@ -133,7 +132,6 @@ public class Store implements HeapSize {
|
|||
new CopyOnWriteArraySet<ChangedReadersObserver>();
|
||||
|
||||
private final int blocksize;
|
||||
private final boolean blockcache;
|
||||
/** Compression algorithm for flush files and minor compaction */
|
||||
private final Compression.Algorithm compression;
|
||||
/** Compression algorithm for major compaction */
|
||||
|
@ -166,7 +164,6 @@ public class Store implements HeapSize {
|
|||
this.region = region;
|
||||
this.family = family;
|
||||
this.conf = conf;
|
||||
this.blockcache = family.isBlockCacheEnabled();
|
||||
this.blocksize = family.getBlocksize();
|
||||
this.compression = family.getCompression();
|
||||
// avoid overriding compression setting for major compactions if the user
|
||||
|
@ -1348,7 +1345,7 @@ public class Store implements HeapSize {
|
|||
this.memstore.getRowKeyAtOrBefore(state);
|
||||
// Check if match, if we got a candidate on the asked for 'kv' row.
|
||||
// Process each store file. Run through from newest to oldest.
|
||||
for (StoreFile sf : Iterables.reverse(storefiles)) {
|
||||
for (StoreFile sf : Lists.reverse(storefiles)) {
|
||||
// Update the candidate keys from the current map file
|
||||
rowAtOrBeforeFromStoreFile(sf, state);
|
||||
}
|
||||
|
@ -1387,7 +1384,7 @@ public class Store implements HeapSize {
|
|||
firstOnRow = new KeyValue(lastKV.getRow(), HConstants.LATEST_TIMESTAMP);
|
||||
}
|
||||
// Get a scanner that caches blocks and that uses pread.
|
||||
HFileScanner scanner = r.getScanner(true, true);
|
||||
HFileScanner scanner = r.getHFileReader().getScanner(true, true);
|
||||
// Seek scanner. If can't seek it, return.
|
||||
if (!seekToScanner(scanner, firstOnRow, firstKV)) return;
|
||||
// If we found candidate on firstOnRow, just return. THIS WILL NEVER HAPPEN!
|
||||
|
@ -1793,7 +1790,7 @@ public class Store implements HeapSize {
|
|||
public static final long FIXED_OVERHEAD = ClassSize.align(
|
||||
ClassSize.OBJECT + (17 * ClassSize.REFERENCE) +
|
||||
(7 * Bytes.SIZEOF_LONG) + (1 * Bytes.SIZEOF_DOUBLE) +
|
||||
(6 * Bytes.SIZEOF_INT) + (2 * Bytes.SIZEOF_BOOLEAN));
|
||||
(7 * Bytes.SIZEOF_INT) + (1 * Bytes.SIZEOF_BOOLEAN));
|
||||
|
||||
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
|
||||
ClassSize.OBJECT + ClassSize.REENTRANT_LOCK +
|
||||
|
|
|
@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
|||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.HalfStoreFileReader;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
|
@ -121,9 +120,6 @@ public class StoreFile {
|
|||
// Need to make it 8k for testing.
|
||||
public static final int DEFAULT_BLOCKSIZE_SMALL = 8 * 1024;
|
||||
|
||||
|
||||
private static BlockCache hfileBlockCache = null;
|
||||
|
||||
private final FileSystem fs;
|
||||
|
||||
// This file's path.
|
||||
|
@ -173,7 +169,6 @@ public class StoreFile {
|
|||
|
||||
// Used making file ids.
|
||||
private final static Random rand = new Random();
|
||||
private final Configuration conf;
|
||||
|
||||
/**
|
||||
* Bloom filter type specified in column family configuration. Does not
|
||||
|
@ -206,7 +201,6 @@ public class StoreFile {
|
|||
final CacheConfig cacheConf,
|
||||
final BloomType cfBloomType)
|
||||
throws IOException {
|
||||
this.conf = conf;
|
||||
this.fs = fs;
|
||||
this.path = p;
|
||||
this.cacheConf = cacheConf;
|
||||
|
|
Loading…
Reference in New Issue