From c2681355b5fa10e2265db4accf3682a9674ee62a Mon Sep 17 00:00:00 2001 From: Jonathan Gray Date: Wed, 12 Oct 2011 19:08:50 +0000 Subject: [PATCH] HBASE-4582 Store.java cleanup (failing TestHeapSize and has warnings) (jgray) git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1182530 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES.txt | 1 + .../org/apache/hadoop/hbase/regionserver/Store.java | 13 +++++-------- .../apache/hadoop/hbase/regionserver/StoreFile.java | 6 ------ 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index c72305939e9..7ab218ec292 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -352,6 +352,7 @@ Release 0.92.0 - Unreleased and regionservers (Lars H) HBASE-4555 TestShell seems passed, but actually errors seen in test output file (Mingjie Lai) + HBASE-4582 Store.java cleanup (failing TestHeapSize and has warnings) TESTS HBASE-4450 test for number of blocks read: to serve as baseline for expected diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 48e73a23f27..3fe9530f665 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -59,7 +59,6 @@ import org.apache.hadoop.util.StringUtils; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; -import com.google.common.collect.Iterables; import com.google.common.collect.Lists; /** @@ -133,7 +132,6 @@ public class Store implements HeapSize { new CopyOnWriteArraySet(); private final int blocksize; - private final boolean blockcache; /** Compression algorithm for flush files and minor compaction */ private final Compression.Algorithm compression; /** Compression algorithm for major compaction */ @@ -166,7 +164,6 @@ public class Store implements HeapSize { this.region = region; this.family = family; this.conf = conf; - this.blockcache = family.isBlockCacheEnabled(); this.blocksize = family.getBlocksize(); this.compression = family.getCompression(); // avoid overriding compression setting for major compactions if the user @@ -1348,7 +1345,7 @@ public class Store implements HeapSize { this.memstore.getRowKeyAtOrBefore(state); // Check if match, if we got a candidate on the asked for 'kv' row. // Process each store file. Run through from newest to oldest. - for (StoreFile sf : Iterables.reverse(storefiles)) { + for (StoreFile sf : Lists.reverse(storefiles)) { // Update the candidate keys from the current map file rowAtOrBeforeFromStoreFile(sf, state); } @@ -1387,7 +1384,7 @@ public class Store implements HeapSize { firstOnRow = new KeyValue(lastKV.getRow(), HConstants.LATEST_TIMESTAMP); } // Get a scanner that caches blocks and that uses pread. - HFileScanner scanner = r.getScanner(true, true); + HFileScanner scanner = r.getHFileReader().getScanner(true, true); // Seek scanner. If can't seek it, return. if (!seekToScanner(scanner, firstOnRow, firstKV)) return; // If we found candidate on firstOnRow, just return. THIS WILL NEVER HAPPEN! @@ -1473,7 +1470,7 @@ public class Store implements HeapSize { return false; } } - + return true; } finally { this.lock.readLock().unlock(); @@ -1508,7 +1505,7 @@ public class Store implements HeapSize { LOG.warn("Storefile " + sf + " Reader is null"); continue; } - + long size = r.length(); if (size > maxSize) { // This is the largest one so far @@ -1793,7 +1790,7 @@ public class Store implements HeapSize { public static final long FIXED_OVERHEAD = ClassSize.align( ClassSize.OBJECT + (17 * ClassSize.REFERENCE) + (7 * Bytes.SIZEOF_LONG) + (1 * Bytes.SIZEOF_DOUBLE) + - (6 * Bytes.SIZEOF_INT) + (2 * Bytes.SIZEOF_BOOLEAN)); + (7 * Bytes.SIZEOF_INT) + (1 * Bytes.SIZEOF_BOOLEAN)); public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD + ClassSize.OBJECT + ClassSize.REENTRANT_LOCK + diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 667a3476d5d..384188d3f5c 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.HalfStoreFileReader; import org.apache.hadoop.hbase.io.Reference; -import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -121,9 +120,6 @@ public class StoreFile { // Need to make it 8k for testing. public static final int DEFAULT_BLOCKSIZE_SMALL = 8 * 1024; - - private static BlockCache hfileBlockCache = null; - private final FileSystem fs; // This file's path. @@ -173,7 +169,6 @@ public class StoreFile { // Used making file ids. private final static Random rand = new Random(); - private final Configuration conf; /** * Bloom filter type specified in column family configuration. Does not @@ -206,7 +201,6 @@ public class StoreFile { final CacheConfig cacheConf, final BloomType cfBloomType) throws IOException { - this.conf = conf; this.fs = fs; this.path = p; this.cacheConf = cacheConf;