From 31564554620d92802c6c45a6e0df8bb05c3b0c37 Mon Sep 17 00:00:00 2001 From: Zhihong Yu Date: Tue, 14 Aug 2012 22:34:18 +0000 Subject: [PATCH] HBASE-6569 Extract HStore interface from Store (Jesse Yates) git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1373153 13f79535-47bb-0310-9956-ffa450edef68 --- .../regionserver/CompactSplitThread.java | 16 +- .../regionserver/CompactionRequestor.java | 5 +- .../ConstantSizeRegionSplitPolicy.java | 2 +- .../hadoop/hbase/regionserver/HRegion.java | 66 ++-- .../hbase/regionserver/HRegionServer.java | 14 +- .../hadoop/hbase/regionserver/HStore.java | 287 ++++++++++++++++++ ...creasingToUpperBoundRegionSplitPolicy.java | 2 +- .../hadoop/hbase/regionserver/MemStore.java | 2 +- .../hbase/regionserver/MemStoreFlusher.java | 2 +- .../hbase/regionserver/RegionSplitPolicy.java | 4 +- .../hadoop/hbase/regionserver/Store.java | 238 ++++----------- .../TestZooKeeperTableArchiveClient.java | 7 +- .../hbase/client/TestFromClientSide.java | 5 +- .../hbase/regionserver/CompactionTool.java | 2 +- .../regionserver/TestAtomicOperation.java | 2 +- .../hbase/regionserver/TestCompaction.java | 32 +- .../hbase/regionserver/TestHRegion.java | 26 +- .../regionserver/TestRegionServerMetrics.java | 2 +- .../hbase/regionserver/wal/TestWALReplay.java | 5 +- .../hbase/util/HFileArchiveTestingUtil.java | 5 +- 20 files changed, 452 insertions(+), 272 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 799a64aea37..12785ed16d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -158,7 +158,7 @@ public class CompactSplitThread implements CompactionRequestor { public synchronized boolean requestSplit(final HRegion r) { // don't split regions that are blocking - if (shouldSplitRegion() && r.getCompactPriority() >= Store.PRIORITY_USER) { + if (shouldSplitRegion() && r.getCompactPriority() >= HStore.PRIORITY_USER) { byte[] midKey = r.checkSplit(); if (midKey != null) { requestSplit(r, midKey); @@ -186,19 +186,19 @@ public class CompactSplitThread implements CompactionRequestor { public synchronized void requestCompaction(final HRegion r, final String why) throws IOException { - for(Store s : r.getStores().values()) { - requestCompaction(r, s, why, Store.NO_PRIORITY); + for (HStore s : r.getStores().values()) { + requestCompaction(r, s, why, HStore.NO_PRIORITY); } } - public synchronized void requestCompaction(final HRegion r, final Store s, + public synchronized void requestCompaction(final HRegion r, final HStore s, final String why) throws IOException { - requestCompaction(r, s, why, Store.NO_PRIORITY); + requestCompaction(r, s, why, HStore.NO_PRIORITY); } public synchronized void requestCompaction(final HRegion r, final String why, int p) throws IOException { - for(Store s : r.getStores().values()) { + for (HStore s : r.getStores().values()) { requestCompaction(r, s, why, p); } } @@ -209,7 +209,7 @@ public class CompactSplitThread implements CompactionRequestor { * @param why Why compaction requested -- used in debug messages * @param priority override the default priority (NO_PRIORITY == decide) */ - public synchronized void requestCompaction(final HRegion r, final Store s, + public synchronized void requestCompaction(final HRegion r, final HStore s, final String why, int priority) throws IOException { if (this.server.isStopped()) { return; @@ -217,7 +217,7 @@ public class CompactSplitThread implements CompactionRequestor { CompactionRequest cr = s.requestCompaction(priority); if (cr != null) { cr.setServer(server); - if (priority != Store.NO_PRIORITY) { + if (priority != HStore.NO_PRIORITY) { cr.setPriority(priority); } ThreadPoolExecutor pool = s.throttleCompaction(cr.getSize()) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java index ad0bb987d00..8d957fb30a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java @@ -37,7 +37,8 @@ public interface CompactionRequestor { * @param why Why compaction was requested -- used in debug messages * @throws IOException */ - public void requestCompaction(final HRegion r, final Store s, final String why) throws IOException; + public void requestCompaction(final HRegion r, final HStore s, final String why) + throws IOException; /** * @param r Region to compact @@ -54,7 +55,7 @@ public interface CompactionRequestor { * @param pri Priority of this compaction. minHeap. <=0 is critical * @throws IOException */ - public void requestCompaction(final HRegion r, final Store s, + public void requestCompaction(final HRegion r, final HStore s, final String why, int pri) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java index a35f2858275..db6e6b5cecd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java @@ -51,7 +51,7 @@ public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy { boolean force = region.shouldForceSplit(); boolean foundABigStore = false; - for (Store store : region.getStores().values()) { + for (HStore store : region.getStores().values()) { // If any of the stores are unable to split (eg they contain reference files) // then don't split if ((!store.canSplit())) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 0ab5bf875ca..adcf8306057 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -203,8 +203,8 @@ public class HRegion implements HeapSize { // , Writable{ private final AtomicInteger lockIdGenerator = new AtomicInteger(1); static private Random rand = new Random(); - protected final Map stores = - new ConcurrentSkipListMap(Bytes.BYTES_RAWCOMPARATOR); + protected final Map stores = new ConcurrentSkipListMap( + Bytes.BYTES_RAWCOMPARATOR); // Registered region protocol handlers private ClassToInstanceMap @@ -642,7 +642,7 @@ public class HRegion implements HeapSize { // , Writable{ * @return True if this region has references. */ public boolean hasReferences() { - for (Store store : this.stores.values()) { + for (HStore store : this.stores.values()) { for (StoreFile sf : store.getStorefiles()) { // Found a reference, return. if (sf.isReference()) return true; @@ -660,7 +660,7 @@ public class HRegion implements HeapSize { // , Writable{ HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); synchronized (this.stores) { - for (Store store : this.stores.values()) { + for (HStore store : this.stores.values()) { for (StoreFile sf : store.getStorefiles()) { HDFSBlocksDistribution storeFileBlocksDistribution = sf.getHDFSBlockDistribution(); @@ -977,7 +977,7 @@ public class HRegion implements HeapSize { // , Writable{ storeCloserThreadPool); // close each store in parallel - for (final Store store : stores.values()) { + for (final HStore store : stores.values()) { completionService .submit(new Callable>() { public ImmutableList call() throws IOException { @@ -1173,7 +1173,7 @@ public class HRegion implements HeapSize { // , Writable{ /** @return returns size of largest HStore. */ public long getLargestHStoreSize() { long size = 0; - for (Store h: stores.values()) { + for (HStore h : stores.values()) { long storeSize = h.getSize(); if (storeSize > size) { size = storeSize; @@ -1205,7 +1205,7 @@ public class HRegion implements HeapSize { // , Writable{ } void triggerMajorCompaction() { - for (Store h: stores.values()) { + for (HStore h : stores.values()) { h.triggerMajorCompaction(); } } @@ -1232,7 +1232,7 @@ public class HRegion implements HeapSize { // , Writable{ * @throws IOException e */ public void compactStores() throws IOException { - for(Store s : getStores().values()) { + for (HStore s : getStores().values()) { CompactionRequest cr = s.requestCompaction(); if(cr != null) { try { @@ -1500,7 +1500,7 @@ public class HRegion implements HeapSize { // , Writable{ wal.startCacheFlush(this.regionInfo.getEncodedNameAsBytes()); completeSequenceId = this.getCompleteCacheFlushSequenceId(sequenceId); - for (Store s : stores.values()) { + for (HStore s : stores.values()) { storeFlushers.add(s.getStoreFlusher(completeSequenceId)); } @@ -1658,7 +1658,7 @@ public class HRegion implements HeapSize { // , Writable{ startRegionOperation(); this.readRequestsCount.increment(); try { - Store store = getStore(family); + HStore store = getStore(family); // get the closest key. (HStore.getRowKeyAtOrBefore can return null) KeyValue key = store.getRowKeyAtOrBefore(row); Result result = null; @@ -2662,7 +2662,7 @@ public class HRegion implements HeapSize { // , Writable{ byte[] family = e.getKey(); List edits = e.getValue(); - Store store = getStore(family); + HStore store = getStore(family); for (KeyValue kv: edits) { kv.setMemstoreTS(localizedWriteEntry.getWriteNumber()); size += store.add(kv); @@ -2702,7 +2702,7 @@ public class HRegion implements HeapSize { // , Writable{ // Remove those keys from the memstore that matches our // key's (row, cf, cq, timestamp, memstoreTS). The interesting part is // that even the memstoreTS has to match for keys that will be rolleded-back. - Store store = getStore(family); + HStore store = getStore(family); for (KeyValue kv: edits) { store.rollback(kv); kvsRolledback++; @@ -2918,7 +2918,7 @@ public class HRegion implements HeapSize { // , Writable{ long editsCount = 0; long intervalEdits = 0; HLog.Entry entry; - Store store = null; + HStore store = null; boolean reported_once = false; try { @@ -3056,7 +3056,7 @@ public class HRegion implements HeapSize { // , Writable{ * @param kv KeyValue to add. * @return True if we should flush. */ - protected boolean restoreEdit(final Store s, final KeyValue kv) { + protected boolean restoreEdit(final HStore s, final KeyValue kv) { long kvSize = s.add(kv); if (this.rsAccounting != null) { rsAccounting.addAndGetRegionReplayEditsSize(this.regionInfo.getRegionName(), kvSize); @@ -3091,11 +3091,11 @@ public class HRegion implements HeapSize { // , Writable{ * @return Store that goes with the family on passed column. * TODO: Make this lookup faster. */ - public Store getStore(final byte [] column) { + public HStore getStore(final byte[] column) { return this.stores.get(column); } - public Map getStores() { + public Map getStores() { return this.stores; } @@ -3111,7 +3111,7 @@ public class HRegion implements HeapSize { // , Writable{ List storeFileNames = new ArrayList(); synchronized(closeLock) { for(byte[] column : columns) { - Store store = this.stores.get(column); + HStore store = this.stores.get(column); if (store == null) { throw new IllegalArgumentException("No column family : " + new String(column) + " available"); @@ -3331,7 +3331,7 @@ public class HRegion implements HeapSize { // , Writable{ byte[] familyName = p.getFirst(); String path = p.getSecond(); - Store store = getStore(familyName); + HStore store = getStore(familyName); if (store == null) { IOException ioe = new DoNotRetryIOException( "No such column family " + Bytes.toStringBinary(familyName)); @@ -3373,7 +3373,7 @@ public class HRegion implements HeapSize { // , Writable{ for (Pair p : familyPaths) { byte[] familyName = p.getFirst(); String path = p.getSecond(); - Store store = getStore(familyName); + HStore store = getStore(familyName); try { store.bulkLoadHFile(path); } catch (IOException ioe) { @@ -3474,7 +3474,7 @@ public class HRegion implements HeapSize { // , Writable{ for (Map.Entry> entry : scan.getFamilyMap().entrySet()) { - Store store = stores.get(entry.getKey()); + HStore store = stores.get(entry.getKey()); KeyValueScanner scanner = store.getScanner(scan, entry.getValue()); scanners.add(scanner); } @@ -4252,7 +4252,7 @@ public class HRegion implements HeapSize { // , Writable{ * @throws IOException */ boolean isMajorCompaction() throws IOException { - for (Store store: this.stores.values()) { + for (HStore store : this.stores.values()) { if (store.isMajorCompaction()) { return true; } @@ -4638,7 +4638,7 @@ public class HRegion implements HeapSize { // , Writable{ boolean flush = false; WALEdit walEdits = null; List allKVs = new ArrayList(append.size()); - Map> tempMemstore = new HashMap>(); + Map> tempMemstore = new HashMap>(); long before = EnvironmentEdgeManager.currentTimeMillis(); long size = 0; long txid = 0; @@ -4655,7 +4655,7 @@ public class HRegion implements HeapSize { // , Writable{ for (Map.Entry> family : append.getFamilyMap() .entrySet()) { - Store store = stores.get(family.getKey()); + HStore store = stores.get(family.getKey()); List kvs = new ArrayList(family.getValue().size()); // Get previous values for all columns in this family @@ -4738,8 +4738,8 @@ public class HRegion implements HeapSize { // , Writable{ } //Actually write to Memstore now - for (Map.Entry> entry : tempMemstore.entrySet()) { - Store store = entry.getKey(); + for (Map.Entry> entry : tempMemstore.entrySet()) { + HStore store = entry.getKey(); size += store.upsert(entry.getValue()); allKVs.addAll(entry.getValue()); } @@ -4791,7 +4791,7 @@ public class HRegion implements HeapSize { // , Writable{ boolean flush = false; WALEdit walEdits = null; List allKVs = new ArrayList(increment.numColumns()); - Map> tempMemstore = new HashMap>(); + Map> tempMemstore = new HashMap>(); long before = EnvironmentEdgeManager.currentTimeMillis(); long size = 0; long txid = 0; @@ -4808,7 +4808,7 @@ public class HRegion implements HeapSize { // , Writable{ for (Map.Entry> family : increment.getFamilyMap().entrySet()) { - Store store = stores.get(family.getKey()); + HStore store = stores.get(family.getKey()); List kvs = new ArrayList(family.getValue().size()); // Get previous values for all columns in this family @@ -4860,8 +4860,8 @@ public class HRegion implements HeapSize { // , Writable{ } //Actually write to Memstore now - for (Map.Entry> entry : tempMemstore.entrySet()) { - Store store = entry.getKey(); + for (Map.Entry> entry : tempMemstore.entrySet()) { + HStore store = entry.getKey(); size += store.upsert(entry.getValue()); allKVs.addAll(entry.getValue()); } @@ -4918,7 +4918,7 @@ public class HRegion implements HeapSize { // , Writable{ Integer lid = obtainRowLock(row); this.updatesLock.readLock().lock(); try { - Store store = stores.get(family); + HStore store = stores.get(family); // Get the old value: Get get = new Get(row); @@ -5029,7 +5029,7 @@ public class HRegion implements HeapSize { // , Writable{ @Override public long heapSize() { long heapSize = DEEP_OVERHEAD; - for(Store store : this.stores.values()) { + for (HStore store : this.stores.values()) { heapSize += store.heapSize(); } // this does not take into account row locks, recent flushes, mvcc entries @@ -5274,7 +5274,7 @@ public class HRegion implements HeapSize { // , Writable{ */ public int getCompactPriority() { int count = Integer.MAX_VALUE; - for(Store store : stores.values()) { + for (HStore store : stores.values()) { count = Math.min(count, store.getCompactPriority()); } return count; @@ -5286,7 +5286,7 @@ public class HRegion implements HeapSize { // , Writable{ * @return true if any store has too many store files */ public boolean needsCompaction() { - for(Store store : stores.values()) { + for (HStore store : stores.values()) { if(store.needsCompaction()) { return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index ce945ac6ac4..665f55a8492 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1142,7 +1142,7 @@ public class HRegionServer implements ClientProtocol, long currentCompactedKVs = 0; synchronized (r.stores) { stores += r.stores.size(); - for (Store store : r.stores.values()) { + for (HStore store : r.stores.values()) { storefiles += store.getStorefilesCount(); storeUncompressedSizeMB += (int) (store.getStoreSizeUncompressed() / 1024 / 1024); @@ -1228,7 +1228,7 @@ public class HRegionServer implements ClientProtocol, for (HRegion r : this.instance.onlineRegions.values()) { if (r == null) continue; - for (Store s : r.getStores().values()) { + for (HStore s : r.getStores().values()) { try { if (s.needsCompaction()) { // Queue a compaction. Will recognize if major is needed. @@ -1369,8 +1369,8 @@ public class HRegionServer implements ClientProtocol, writeRequestsCount += r.writeRequestsCount.get(); synchronized (r.stores) { stores += r.stores.size(); - for (Map.Entry ee : r.stores.entrySet()) { - final Store store = ee.getValue(); + for (Map.Entry ee : r.stores.entrySet()) { + final HStore store = ee.getValue(); final SchemaMetrics schemaMetrics = store.getSchemaMetrics(); { @@ -1644,7 +1644,7 @@ public class HRegionServer implements ClientProtocol, LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString() + ", daughter=" + daughter); // Do checks to see if we need to compact (references or too many files) - for (Store s : r.getStores().values()) { + for (HStore s : r.getStores().values()) { if (s.hasReferences() || s.needsCompaction()) { getCompactionRequester().requestCompaction(r, s, "Opening Region"); } @@ -2009,7 +2009,7 @@ public class HRegionServer implements ClientProtocol, int storefileIndexSizeMB = 0; synchronized (r.stores) { stores += r.stores.size(); - for (Store store : r.stores.values()) { + for (HStore store : r.stores.values()) { storefiles += store.getStorefilesCount(); storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024); storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024); @@ -3590,7 +3590,7 @@ public class HRegionServer implements ClientProtocol, region.getRegionNameAsString()); compactSplitThread.requestCompaction(region, "User-triggered " + (major ? "major " : "") + "compaction", - Store.PRIORITY_USER); + HStore.PRIORITY_USER); return CompactRegionResponse.newBuilder().build(); } catch (IOException ie) { throw new ServiceException(ie); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java new file mode 100644 index 00000000000..20a38b053ec --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -0,0 +1,287 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.List; +import java.util.NavigableSet; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.SchemaAware; + +import com.google.common.collect.ImmutableList; + +/** + * Interface for objects that hold a column family in a Region. Its a memstore and a set of zero or + * more StoreFiles, which stretch backwards over time. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface HStore extends SchemaAware, HeapSize { + + /* The default priority for user-specified compaction requests. + * The user gets top priority unless we have blocking compactions. (Pri <= 0) + */ + public static final int PRIORITY_USER = 1; + public static final int NO_PRIORITY = Integer.MIN_VALUE; + + // General Accessors + public KeyValue.KVComparator getComparator(); + + public List getStorefiles(); + + /** + * Close all the readers We don't need to worry about subsequent requests because the HRegion + * holds a write lock that will prevent any more reads or writes. + * @return the {@link StoreFile StoreFiles} that were previously being used. + * @throws IOException on failure + */ + public ImmutableList close() throws IOException; + + /** + * Return a scanner for both the memstore and the HStore files. Assumes we are not in a + * compaction. + * @param scan Scan to apply when scanning the stores + * @param targetCols columns to scan + * @return a scanner over the current key values + * @throws IOException on failure + */ + public KeyValueScanner getScanner(Scan scan, final NavigableSet targetCols) + throws IOException; + + /** + * Updates the value for the given row/family/qualifier. This function will always be seen as + * atomic by other readers because it only puts a single KV to memstore. Thus no read/write + * control necessary. + * @param row row to update + * @param f family to update + * @param qualifier qualifier to update + * @param newValue the new value to set into memstore + * @return memstore size delta + * @throws IOException + */ + public long updateColumnValue(byte[] row, byte[] f, byte[] qualifier, long newValue) + throws IOException; + + /** + * Adds or replaces the specified KeyValues. + *

+ * For each KeyValue specified, if a cell with the same row, family, and qualifier exists in + * MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore. + *

+ * This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic + * across all of them. + * @param kvs + * @return memstore size delta + * @throws IOException + */ + public long upsert(Iterable kvs) throws IOException; + + /** + * Adds a value to the memstore + * @param kv + * @return memstore size delta + */ + public long add(KeyValue kv); + + /** + * Removes a kv from the memstore. The KeyValue is removed only if its key & memstoreTS match the + * key & memstoreTS value of the kv parameter. + * @param kv + */ + public void rollback(final KeyValue kv); + + /** + * Find the key that matches row exactly, or the one that immediately precedes it. WARNING: + * Only use this method on a table where writes occur with strictly increasing timestamps. This + * method assumes this pattern of writes in order to make it reasonably performant. Also our + * search is dependent on the axiom that deletes are for cells that are in the container that + * follows whether a memstore snapshot or a storefile, not for the current container: i.e. we'll + * see deletes before we come across cells we are to delete. Presumption is that the + * memstore#kvset is processed before memstore#snapshot and so on. + * @param row The row key of the targeted row. + * @return Found keyvalue or null if none found. + * @throws IOException + */ + public KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException; + + // Compaction oriented methods + + public boolean throttleCompaction(long compactionSize); + + /** + * getter for CompactionProgress object + * @return CompactionProgress object; can be null + */ + public CompactionProgress getCompactionProgress(); + + public CompactionRequest requestCompaction() throws IOException; + + public CompactionRequest requestCompaction(int priority) throws IOException; + + public void finishRequest(CompactionRequest cr); + + /** + * @return true if we should run a major compaction. + */ + public boolean isMajorCompaction() throws IOException; + + public void triggerMajorCompaction(); + + /** + * See if there's too much store files in this store + * @return true if number of store files is greater than the number defined in minFilesToCompact + */ + public boolean needsCompaction(); + + public int getCompactPriority(); + + /** + * @param priority priority to check against. When priority is {@link HStore#PRIORITY_USER}, + * {@link HStore#PRIORITY_USER} is returned. + * @return The priority that this store has in the compaction queue. + */ + public int getCompactPriority(int priority); + + public StoreFlusher getStoreFlusher(long cacheFlushId); + + // Split oriented methods + + public boolean canSplit(); + + /** + * Determines if Store should be split + * @return byte[] if store should be split, null otherwise. + */ + public byte[] getSplitPoint(); + + // Bulk Load methods + + /** + * This throws a WrongRegionException if the HFile does not fit in this region, or an + * InvalidHFileException if the HFile is not valid. + */ + public void assertBulkLoadHFileOk(Path srcPath) throws IOException; + + /** + * This method should only be called from HRegion. It is assumed that the ranges of values in the + * HFile fit within the stores assigned region. (assertBulkLoadHFileOk checks this) + */ + public void bulkLoadHFile(String srcPathStr) throws IOException; + + // General accessors into the state of the store + // TODO abstract some of this out into a metrics class + + /** + * @return true if the store has any underlying reference files to older HFiles + */ + public boolean hasReferences(); + + /** + * @return The size of this store's memstore, in bytes + */ + public long getMemStoreSize(); + + public HColumnDescriptor getFamily(); + + /** + * @return The maximum memstoreTS in all store files. + */ + public long getMaxMemstoreTS(); + + /** + * @return the data block encoder + */ + public HFileDataBlockEncoder getDataBlockEncoder(); + + /** + * @return the number of files in this store + */ + public int getNumberOfStoreFiles(); + + /** @return aggregate size of all HStores used in the last compaction */ + public long getLastCompactSize(); + + /** @return aggregate size of HStore */ + public long getSize(); + + /** + * @return Count of store files + */ + public int getStorefilesCount(); + + /** + * @return The size of the store files, in bytes, uncompressed. + */ + public long getStoreSizeUncompressed(); + + /** + * @return The size of the store files, in bytes. + */ + public long getStorefilesSize(); + + /** + * @return The size of the store file indexes, in bytes. + */ + public long getStorefilesIndexSize(); + + /** + * Returns the total size of all index blocks in the data block indexes, including the root level, + * intermediate levels, and the leaf level for multi-level indexes, or just the root level for + * single-level indexes. + * @return the total size of block indexes in the store + */ + public long getTotalStaticIndexSize(); + + /** + * Returns the total byte size of all Bloom filter bit arrays. For compound Bloom filters even the + * Bloom blocks currently not loaded into the block cache are counted. + * @return the total size of all Bloom filters in the store + */ + public long getTotalStaticBloomSize(); + + // Test-helper methods + + /** + * Compact the most recent N files. Used in testing. + * @param N number of files to compact. Must be less than or equal to current number of files. + * @throws IOException on failure + */ + public void compactRecentForTesting(int N) throws IOException; + + /** + * Used for tests. + * @return cache configuration for this Store. + */ + public CacheConfig getCacheConfig(); + + /** + * @return the parent region hosting this store + */ + public HRegion getHRegion(); +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java index e8b54f9d1ae..d65a9df7693 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java @@ -60,7 +60,7 @@ extends ConstantSizeRegionSplitPolicy { // Get size to check long sizeToCheck = getSizeToCheck(tableRegionsCount); - for (Store store : region.getStores().values()) { + for (HStore store : region.getStores().values()) { // If any of the stores is unable to split (eg they contain reference files) // then don't split if ((!store.canSplit())) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index 033c9f862bf..6c5fdf6f5d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -522,7 +522,7 @@ public class MemStore implements HeapSize { * @param kvs * @return change in memstore size */ - public long upsert(List kvs) { + public long upsert(Iterable kvs) { this.lock.readLock().lock(); try { long size = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index cb6ed3c5c96..44f1ab447e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -437,7 +437,7 @@ class MemStoreFlusher extends HasThread implements FlushRequester { } private boolean isTooManyStoreFiles(HRegion region) { - for (Store hstore: region.stores.values()) { + for (HStore hstore : region.stores.values()) { if (hstore.getStorefilesCount() > this.blockingStoreFilesNumber) { return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java index 518124d49aa..9ad2c6d7079 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java @@ -73,11 +73,11 @@ public abstract class RegionSplitPolicy extends Configured { if (explicitSplitPoint != null) { return explicitSplitPoint; } - Map stores = region.getStores(); + Map stores = region.getStores(); byte[] splitPointFromLargestStore = null; long largestStoreSize = 0; - for (Store s : stores.values()) { + for (HStore s : stores.values()) { byte[] splitPoint = s.getSplitPoint(); long storeSize = s.getSize(); if (splitPoint != null && largestStoreSize < storeSize) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index ab90034d256..caf909d0061 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -107,7 +107,7 @@ import com.google.common.collect.Lists; * not be called directly but by an HRegion manager. */ @InterfaceAudience.Private -public class Store extends SchemaConfigured implements HeapSize { +public class Store extends SchemaConfigured implements HStore { static final Log LOG = LogFactory.getLog(Store.class); protected final MemStore memstore; @@ -135,12 +135,6 @@ public class Store extends SchemaConfigured implements HeapSize { final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); private final boolean verifyBulkLoads; - /* The default priority for user-specified compaction requests. - * The user gets top priority unless we have blocking compactions. (Pri <= 0) - */ - public static final int PRIORITY_USER = 1; - public static final int NO_PRIORITY = Integer.MIN_VALUE; - // not private for testing /* package */ScanInfo scanInfo; /* @@ -322,9 +316,7 @@ public class Store extends SchemaConfigured implements HeapSize { return StoreFile.getMaxSequenceIdInList(this.getStorefiles()); } - /** - * @return The maximum memstoreTS in all store files. - */ + @Override public long getMaxMemstoreTS() { return StoreFile.getMaxMemstoreTSInList(this.getStorefiles()); } @@ -349,9 +341,7 @@ public class Store extends SchemaConfigured implements HeapSize { return homedir; } - /** - * @return the data block encoder - */ + @Override public HFileDataBlockEncoder getDataBlockEncoder() { return dataBlockEncoder; } @@ -364,7 +354,7 @@ public class Store extends SchemaConfigured implements HeapSize { this.dataBlockEncoder = blockEncoder; } - FileStatus [] getStoreFiles() throws IOException { + FileStatus[] getStoreFiles() throws IOException { return FSUtils.listStatus(this.fs, this.homedir, null); } @@ -438,13 +428,8 @@ public class Store extends SchemaConfigured implements HeapSize { return results; } - /** - * Adds a value to the memstore - * - * @param kv - * @return memstore size delta - */ - protected long add(final KeyValue kv) { + @Override + public long add(final KeyValue kv) { lock.readLock().lock(); try { return this.memstore.add(kv); @@ -468,14 +453,8 @@ public class Store extends SchemaConfigured implements HeapSize { } } - /** - * Removes a kv from the memstore. The KeyValue is removed only - * if its key & memstoreTS matches the key & memstoreTS value of the - * kv parameter. - * - * @param kv - */ - protected void rollback(final KeyValue kv) { + @Override + public void rollback(final KeyValue kv) { lock.readLock().lock(); try { this.memstore.rollback(kv); @@ -487,15 +466,13 @@ public class Store extends SchemaConfigured implements HeapSize { /** * @return All store files. */ + @Override public List getStorefiles() { return this.storefiles; } - /** - * This throws a WrongRegionException if the HFile does not fit in this - * region, or an InvalidHFileException if the HFile is not valid. - */ - void assertBulkLoadHFileOk(Path srcPath) throws IOException { + @Override + public void assertBulkLoadHFileOk(Path srcPath) throws IOException { HFile.Reader reader = null; try { LOG.info("Validating hfile at " + srcPath + " for inclusion in " @@ -555,12 +532,8 @@ public class Store extends SchemaConfigured implements HeapSize { } } - /** - * This method should only be called from HRegion. It is assumed that the - * ranges of values in the HFile fit within the stores assigned region. - * (assertBulkLoadHFileOk checks this) - */ - void bulkLoadHFile(String srcPathStr) throws IOException { + @Override + public void bulkLoadHFile(String srcPathStr) throws IOException { Path srcPath = new Path(srcPathStr); // Copy the file if it's on another filesystem @@ -619,15 +592,8 @@ public class Store extends SchemaConfigured implements HeapSize { fs, region.getTmpDir()); } - /** - * Close all the readers - * - * We don't need to worry about subsequent requests because the HRegion holds - * a write lock that will prevent any more reads or writes. - * - * @throws IOException - */ - ImmutableList close() throws IOException { + @Override + public ImmutableList close() throws IOException { this.lock.writeLock().lock(); try { ImmutableList result = storefiles; @@ -674,8 +640,9 @@ public class Store extends SchemaConfigured implements HeapSize { } /** - * Snapshot this stores memstore. Call before running - * {@link #flushCache(long, SortedSet)} so it has some work to do. + * Snapshot this stores memstore. Call before running + * {@link #flushCache(long, SortedSet, TimeRangeTracker, AtomicLong, MonitoredTask)} so it has + * some work to do. */ void snapshot() { this.memstore.snapshot(); @@ -1066,9 +1033,7 @@ public class Store extends SchemaConfigured implements HeapSize { return sf; } - /** - * Compact the most recent N files. Used in testing. - */ + @Override public void compactRecentForTesting(int N) throws IOException { List filesToCompact; long maxId; @@ -1117,7 +1082,8 @@ public class Store extends SchemaConfigured implements HeapSize { } } - boolean hasReferences() { + @Override + public boolean hasReferences() { return hasReferences(this.storefiles); } @@ -1152,17 +1118,13 @@ public class Store extends SchemaConfigured implements HeapSize { return minTs; } - /** getter for CompactionProgress object - * @return CompactionProgress object; can be null - */ + @Override public CompactionProgress getCompactionProgress() { return this.compactor.getProgress(); } - /* - * @return True if we should run a major compaction. - */ - boolean isMajorCompaction() throws IOException { + @Override + public boolean isMajorCompaction() throws IOException { for (StoreFile sf : this.storefiles) { if (sf.getReader() == null) { LOG.debug("StoreFile " + sf + " has null Reader"); @@ -1259,7 +1221,7 @@ public class Store extends SchemaConfigured implements HeapSize { } public CompactionRequest requestCompaction() throws IOException { - return requestCompaction(NO_PRIORITY); + return requestCompaction(HStore.NO_PRIORITY); } public CompactionRequest requestCompaction(int priority) throws IOException { @@ -1350,7 +1312,7 @@ public class Store extends SchemaConfigured implements HeapSize { * @throws IOException */ CompactSelection compactSelection(List candidates) throws IOException { - return compactSelection(candidates,NO_PRIORITY); + return compactSelection(candidates,HStore.NO_PRIORITY); } /** @@ -1423,7 +1385,7 @@ public class Store extends SchemaConfigured implements HeapSize { // Force a major compaction if this is a user-requested major compaction, // or if we do not have too many files to compact and this was requested // as a major compaction - boolean majorcompaction = (forcemajor && priority == PRIORITY_USER) || + boolean majorcompaction = (forcemajor && priority == HStore.PRIORITY_USER) || (forcemajor || isMajorCompaction(compactSelection.getFilesToCompact())) && (compactSelection.getFilesToCompact().size() < this.maxFilesToCompact ); @@ -1515,7 +1477,7 @@ public class Store extends SchemaConfigured implements HeapSize { if (compactSelection.getFilesToCompact().size() > this.maxFilesToCompact) { LOG.debug("Warning, compacting more than " + this.maxFilesToCompact + " files, probably because of a user-requested major compaction"); - if(priority != PRIORITY_USER) { + if(priority != HStore.PRIORITY_USER) { LOG.error("Compacting more than max files on a non user-requested compaction"); } } @@ -1663,9 +1625,7 @@ public class Store extends SchemaConfigured implements HeapSize { // Accessors. // (This is the only section that is directly useful!) ////////////////////////////////////////////////////////////////////////////// - /** - * @return the number of files in this store - */ + @Override public int getNumberOfStoreFiles() { return this.storefiles.size(); } @@ -1687,21 +1647,8 @@ public class Store extends SchemaConfigured implements HeapSize { return key.getTimestamp() < oldestTimestamp; } - /** - * Find the key that matches row exactly, or the one that immediately - * precedes it. WARNING: Only use this method on a table where writes occur - * with strictly increasing timestamps. This method assumes this pattern of - * writes in order to make it reasonably performant. Also our search is - * dependent on the axiom that deletes are for cells that are in the container - * that follows whether a memstore snapshot or a storefile, not for the - * current container: i.e. we'll see deletes before we come across cells we - * are to delete. Presumption is that the memstore#kvset is processed before - * memstore#snapshot and so on. - * @param row The row key of the targeted row. - * @return Found keyvalue or null if none found. - * @throws IOException - */ - KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException { + @Override + public KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException { // If minVersions is set, we will not ignore expired KVs. // As we're only looking for the latest matches, that should be OK. // With minVersions > 0 we guarantee that any KV that has any version @@ -1855,10 +1802,8 @@ public class Store extends SchemaConfigured implements HeapSize { this.lock.readLock().unlock(); } } - /** - * Determines if Store should be split - * @return byte[] if store should be split, null otherwise. - */ + + @Override public byte[] getSplitPoint() { this.lock.readLock().lock(); try { @@ -1928,12 +1873,12 @@ public class Store extends SchemaConfigured implements HeapSize { return null; } - /** @return aggregate size of all HStores used in the last compaction */ + @Override public long getLastCompactSize() { return this.lastCompactSize; } - /** @return aggregate size of HStore */ + @Override public long getSize() { return storeSize; } @@ -1950,11 +1895,7 @@ public class Store extends SchemaConfigured implements HeapSize { // File administration ////////////////////////////////////////////////////////////////////////////// - /** - * Return a scanner for both the memstore and the HStore files. Assumes we - * are not in a compaction. - * @throws IOException - */ + @Override public KeyValueScanner getScanner(Scan scan, final NavigableSet targetCols) throws IOException { lock.readLock().lock(); @@ -1977,24 +1918,18 @@ public class Store extends SchemaConfigured implements HeapSize { return getColumnFamilyName(); } - /** - * @return Count of store files - */ - int getStorefilesCount() { + @Override + public int getStorefilesCount() { return this.storefiles.size(); } - /** - * @return The size of the store files, in bytes, uncompressed. - */ - long getStoreSizeUncompressed() { + @Override + public long getStoreSizeUncompressed() { return this.totalUncompressedBytes; } - /** - * @return The size of the store files, in bytes. - */ - long getStorefilesSize() { + @Override + public long getStorefilesSize() { long size = 0; for (StoreFile s: storefiles) { StoreFile.Reader r = s.getReader(); @@ -2007,10 +1942,8 @@ public class Store extends SchemaConfigured implements HeapSize { return size; } - /** - * @return The size of the store file indexes, in bytes. - */ - long getStorefilesIndexSize() { + @Override + public long getStorefilesIndexSize() { long size = 0; for (StoreFile s: storefiles) { StoreFile.Reader r = s.getReader(); @@ -2023,14 +1956,8 @@ public class Store extends SchemaConfigured implements HeapSize { return size; } - /** - * Returns the total size of all index blocks in the data block indexes, - * including the root level, intermediate levels, and the leaf level for - * multi-level indexes, or just the root level for single-level indexes. - * - * @return the total size of block indexes in the store - */ - long getTotalStaticIndexSize() { + @Override + public long getTotalStaticIndexSize() { long size = 0; for (StoreFile s : storefiles) { size += s.getReader().getUncompressedDataIndexSize(); @@ -2038,14 +1965,8 @@ public class Store extends SchemaConfigured implements HeapSize { return size; } - /** - * Returns the total byte size of all Bloom filter bit arrays. For compound - * Bloom filters even the Bloom blocks currently not loaded into the block - * cache are counted. - * - * @return the total size of all Bloom filters in the store - */ - long getTotalStaticBloomSize() { + @Override + public long getTotalStaticBloomSize() { long size = 0; for (StoreFile s : storefiles) { StoreFile.Reader r = s.getReader(); @@ -2054,31 +1975,27 @@ public class Store extends SchemaConfigured implements HeapSize { return size; } - /** - * @return The size of this store's memstore, in bytes - */ - long getMemStoreSize() { + @Override + public long getMemStoreSize() { return this.memstore.heapSize(); } public int getCompactPriority() { - return getCompactPriority(NO_PRIORITY); + return getCompactPriority(HStore.NO_PRIORITY); } - /** - * @return The priority that this store should have in the compaction queue - * @param priority - */ + @Override public int getCompactPriority(int priority) { // If this is a user-requested compaction, leave this at the highest priority - if(priority == PRIORITY_USER) { - return PRIORITY_USER; + if(priority == HStore.PRIORITY_USER) { + return HStore.PRIORITY_USER; } else { return this.blockingStoreFileCount - this.storefiles.size(); } } - boolean throttleCompaction(long compactionSize) { + @Override + public boolean throttleCompaction(long compactionSize) { // see HBASE-5867 for discussion on the default long throttlePoint = conf.getLong( "hbase.regionserver.thread.compaction.throttle", @@ -2086,6 +2003,7 @@ public class Store extends SchemaConfigured implements HeapSize { return compactionSize > throttlePoint; } + @Override public HRegion getHRegion() { return this.region; } @@ -2094,20 +2012,7 @@ public class Store extends SchemaConfigured implements HeapSize { return this.region.regionInfo; } - /** - * Increments the value for the given row/family/qualifier. - * - * This function will always be seen as atomic by other readers - * because it only puts a single KV to memstore. Thus no - * read/write control necessary. - * - * @param row - * @param f - * @param qualifier - * @param newValue the new value to set into memstore - * @return memstore size delta - * @throws IOException - */ + @Override public long updateColumnValue(byte [] row, byte [] f, byte [] qualifier, long newValue) throws IOException { @@ -2127,21 +2032,8 @@ public class Store extends SchemaConfigured implements HeapSize { } } - /** - * Adds or replaces the specified KeyValues. - *

- * For each KeyValue specified, if a cell with the same row, family, and - * qualifier exists in MemStore, it will be replaced. Otherwise, it will just - * be inserted to MemStore. - *

- * This operation is atomic on each KeyValue (row/family/qualifier) but not - * necessarily atomic across all of them. - * @param kvs - * @return memstore size delta - * @throws IOException - */ - public long upsert(List kvs) - throws IOException { + @Override + public long upsert(Iterable kvs) throws IOException { this.lock.readLock().lock(); try { // TODO: Make this operation atomic w/ MVCC @@ -2201,18 +2093,12 @@ public class Store extends SchemaConfigured implements HeapSize { } } - /** - * See if there's too much store files in this store - * @return true if number of store files is greater than - * the number defined in minFilesToCompact - */ + @Override public boolean needsCompaction() { return (storefiles.size() - filesCompacting.size()) > minFilesToCompact; } - /** - * Used for tests. Get the cache configuration for this Store. - */ + @Override public CacheConfig getCacheConfig() { return this.cacheConf; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index 12abb615b0c..1f1545b4d5e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; import org.apache.hadoop.hbase.regionserver.CheckedArchivingHFileCleaner; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -177,7 +178,7 @@ public class TestZooKeeperTableArchiveClient { loadAndCompact(region); // check that we actually have some store files that were archived - Store store = region.getStore(TEST_FAM); + HStore store = region.getStore(TEST_FAM); Path storeArchiveDir = HFileArchiveTestingUtil.getStoreArchivePath(UTIL.getConfiguration(), region, store); @@ -337,7 +338,7 @@ public class TestZooKeeperTableArchiveClient { loadAndCompact(region); // check that we actually have some store files that were archived - Store store = region.getStore(TEST_FAM); + HStore store = region.getStore(TEST_FAM); Path storeArchiveDir = HFileArchiveTestingUtil.getStoreArchivePath(UTIL.getConfiguration(), region, store); @@ -375,7 +376,7 @@ public class TestZooKeeperTableArchiveClient { * Compact all the store files in a given region. */ private void compactRegion(HRegion region, byte[] family) throws IOException { - Store store = region.getStores().get(TEST_FAM); + HStore store = region.getStores().get(TEST_FAM); store.compactRecentForTesting(store.getStorefiles().size()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 3d1ba3efdf8..9a6a3778ba6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -4593,7 +4594,7 @@ public class TestFromClientSide { String regionName = table.getRegionLocations().firstKey().getEncodedName(); HRegion region = TEST_UTIL.getRSForFirstRegionInTable( tableName).getFromOnlineRegions(regionName); - Store store = region.getStores().values().iterator().next(); + HStore store = region.getStores().values().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); @@ -4668,7 +4669,7 @@ public class TestFromClientSide { assertEquals(++expectedBlockMiss, cache.getStats().getMissCount()); } - private void waitForStoreFileCount(Store store, int count, int timeout) + private void waitForStoreFileCount(HStore store, int count, int timeout) throws InterruptedException { long start = System.currentTimeMillis(); while (start + timeout > System.currentTimeMillis() && diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index be4bc85f41f..3860ed1db9a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -123,7 +123,7 @@ public class CompactionTool implements Tool { // list of files instead of have Store search its home dir. return new Store(tmpdir, region, hcd, fs, getConf()) { @Override - FileStatus[] getStoreFiles() throws IOException { + public FileStatus[] getStoreFiles() throws IOException { return this.fs.listStatus(getHomedir()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java index 6cbb2bc3f19..b0453f501cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java @@ -127,7 +127,7 @@ public class TestAtomicOperation extends HBaseTestCase { assertEquals(value+amount, result); - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); // ICV removes any extra values floating around in there. assertEquals(1, store.memstore.kvset.size()); assertTrue(store.memstore.snapshot.isEmpty()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index ec53a28a14d..92f62ae1d20 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -170,10 +170,10 @@ public class TestCompaction extends HBaseTestCase { throws Exception { Map replaceBlockCache = new HashMap(); - for (Entry pair : r.getStores().entrySet()) { - Store store = pair.getValue(); + for (Entry pair : r.getStores().entrySet()) { + Store store = (Store) pair.getValue(); HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder(); - replaceBlockCache.put(pair.getValue(), blockEncoder); + replaceBlockCache.put(store, blockEncoder); final DataBlockEncoding inCache = DataBlockEncoding.PREFIX; final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE : inCache; @@ -206,7 +206,7 @@ public class TestCompaction extends HBaseTestCase { assertEquals(compactionThreshold, result.size()); // see if CompactionProgress is in place but null - for (Store store: this.r.stores.values()) { + for (HStore store : this.r.stores.values()) { assertNull(store.getCompactionProgress()); } @@ -215,7 +215,7 @@ public class TestCompaction extends HBaseTestCase { // see if CompactionProgress has done its thing on at least one store int storeCount = 0; - for (Store store: this.r.stores.values()) { + for (HStore store : this.r.stores.values()) { CompactionProgress progress = store.getCompactionProgress(); if( progress != null ) { ++storeCount; @@ -281,7 +281,8 @@ public class TestCompaction extends HBaseTestCase { // Multiple versions allowed for an entry, so the delete isn't enough // Lower TTL and expire to ensure that all our entries have been wiped final int ttl = 1000; - for (Store store: this.r.stores.values()) { + for (HStore hstore : this.r.stores.values()) { + Store store = ((Store) hstore); Store.ScanInfo old = store.scanInfo; Store.ScanInfo si = new Store.ScanInfo(old.getFamily(), old.getMinVersions(), old.getMaxVersions(), ttl, @@ -302,7 +303,7 @@ public class TestCompaction extends HBaseTestCase { conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, delay); conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct); - Store s = r.getStore(COLUMN_FAMILY); + Store s = ((Store) r.getStore(COLUMN_FAMILY)); try { createStoreFile(r); createStoreFile(r); @@ -435,7 +436,7 @@ public class TestCompaction extends HBaseTestCase { assertEquals(compactionThreshold, result.size()); // do a compaction - Store store2 = this.r.stores.get(fam2); + HStore store2 = this.r.stores.get(fam2); int numFiles1 = store2.getStorefiles().size(); assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3 store2.compactRecentForTesting(compactionThreshold); // = 3 @@ -512,7 +513,7 @@ public class TestCompaction extends HBaseTestCase { spyR.compactStores(); // ensure that the compaction stopped, all old files are intact, - Store s = r.stores.get(COLUMN_FAMILY); + HStore s = r.stores.get(COLUMN_FAMILY); assertEquals(compactionThreshold, s.getStorefilesCount()); assertTrue(s.getStorefilesSize() > 15*1000); // and no new store files persisted past compactStores() @@ -536,7 +537,8 @@ public class TestCompaction extends HBaseTestCase { // Multiple versions allowed for an entry, so the delete isn't enough // Lower TTL and expire to ensure that all our entries have been wiped final int ttl = 1000; - for (Store store: this.r.stores.values()) { + for (HStore hstore: this.r.stores.values()) { + Store store = (Store)hstore; Store.ScanInfo old = store.scanInfo; Store.ScanInfo si = new Store.ScanInfo(old.getFamily(), old.getMinVersions(), old.getMaxVersions(), ttl, @@ -583,7 +585,7 @@ public class TestCompaction extends HBaseTestCase { for (int i = 0; i < nfiles; i++) { createStoreFile(r); } - Store store = r.getStore(COLUMN_FAMILY); + Store store = (Store) r.getStore(COLUMN_FAMILY); List storeFiles = store.getStorefiles(); long maxId = StoreFile.getMaxSequenceIdInList(storeFiles); @@ -621,14 +623,14 @@ public class TestCompaction extends HBaseTestCase { * Test for HBASE-5920 - Test user requested major compactions always occurring */ public void testNonUserMajorCompactionRequest() throws Exception { - Store store = r.getStore(COLUMN_FAMILY); + HStore store = r.getStore(COLUMN_FAMILY); createStoreFile(r); for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) { createStoreFile(r); } store.triggerMajorCompaction(); - CompactionRequest request = store.requestCompaction(Store.NO_PRIORITY); + CompactionRequest request = store.requestCompaction(HStore.NO_PRIORITY); assertNotNull("Expected to receive a compaction request", request); assertEquals( "System-requested major compaction should not occur if there are too many store files", @@ -640,13 +642,13 @@ public class TestCompaction extends HBaseTestCase { * Test for HBASE-5920 */ public void testUserMajorCompactionRequest() throws IOException{ - Store store = r.getStore(COLUMN_FAMILY); + HStore store = r.getStore(COLUMN_FAMILY); createStoreFile(r); for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) { createStoreFile(r); } store.triggerMajorCompaction(); - CompactionRequest request = store.requestCompaction(Store.PRIORITY_USER); + CompactionRequest request = store.requestCompaction(HStore.PRIORITY_USER); assertNotNull("Expected to receive a compaction request", request); assertEquals( "User-requested major compaction should always occur, even if there are too many store files", diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index f881da966bb..85f8e4f3f81 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -236,7 +236,7 @@ public class TestHRegion extends HBaseTestCase { MonitoredTask status = TaskMonitor.get().createStatus(method); Map maxSeqIdInStores = new TreeMap( Bytes.BYTES_COMPARATOR); - for (Store store : region.getStores().values()) { + for (HStore store : region.getStores().values()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId - 1); } @@ -288,7 +288,7 @@ public class TestHRegion extends HBaseTestCase { MonitoredTask status = TaskMonitor.get().createStatus(method); Map maxSeqIdInStores = new TreeMap( Bytes.BYTES_COMPARATOR); - for (Store store : region.getStores().values()) { + for (HStore store : region.getStores().values()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1); } @@ -336,7 +336,7 @@ public class TestHRegion extends HBaseTestCase { Map maxSeqIdInStores = new TreeMap( Bytes.BYTES_COMPARATOR); - for (Store store : region.getStores().values()) { + for (HStore store : region.getStores().values()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId); } long seqId = region.replayRecoveredEditsIfAny(regiondir, @@ -864,7 +864,7 @@ public class TestHRegion extends HBaseTestCase { put.add(kv); //checkAndPut with wrong value - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); store.memstore.kvset.size(); boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, @@ -1379,10 +1379,10 @@ public class TestHRegion extends HBaseTestCase { // extract the key values out the memstore: // This is kinda hacky, but better than nothing... long now = System.currentTimeMillis(); - KeyValue firstKv = region.getStore(fam1).memstore.kvset.first(); + KeyValue firstKv = ((Store) region.getStore(fam1)).memstore.kvset.first(); assertTrue(firstKv.getTimestamp() <= now); now = firstKv.getTimestamp(); - for (KeyValue kv: region.getStore(fam1).memstore.kvset) { + for (KeyValue kv : ((Store) region.getStore(fam1)).memstore.kvset) { assertTrue(kv.getTimestamp() <= now); now = kv.getTimestamp(); } @@ -2320,7 +2320,7 @@ public class TestHRegion extends HBaseTestCase { assertEquals(value+amount, result); - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); // ICV removes any extra values floating around in there. assertEquals(1, store.memstore.kvset.size()); assertTrue(store.memstore.snapshot.isEmpty()); @@ -2346,7 +2346,7 @@ public class TestHRegion extends HBaseTestCase { region.put(put); // get the store in question: - Store s = region.getStore(fam1); + Store s = (Store) region.getStore(fam1); s.snapshot(); //bam // now increment: @@ -2490,7 +2490,7 @@ public class TestHRegion extends HBaseTestCase { // flush to disk. region.flushcache(); - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); assertEquals(0, store.memstore.kvset.size()); long r = region.incrementColumnValue(row, fam1, qual1, amount, true); @@ -2516,7 +2516,7 @@ public class TestHRegion extends HBaseTestCase { region.put(put); region.flushcache(); - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); assertEquals(0, store.memstore.kvset.size()); long r = region.incrementColumnValue(row, fam1, qual3, amount, true); @@ -2562,7 +2562,7 @@ public class TestHRegion extends HBaseTestCase { assertEquals(value+amount, result); - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); // ICV should update the existing Put with the same timestamp assertEquals(1, store.memstore.kvset.size()); assertTrue(store.memstore.snapshot.isEmpty()); @@ -2578,7 +2578,7 @@ public class TestHRegion extends HBaseTestCase { assertEquals(value+amount, result); - store = region.getStore(fam1); + store = (Store) region.getStore(fam1); // ICV should update the existing Put with the same timestamp assertEquals(2, store.memstore.kvset.size()); assertTrue(store.memstore.snapshot.isEmpty()); @@ -3397,7 +3397,7 @@ public class TestHRegion extends HBaseTestCase { region.flushcache(); } //before compaction - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); List storeFiles = store.getStorefiles(); for (StoreFile storefile : storeFiles) { StoreFile.Reader reader = storefile.getReader(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java index a383a4842fb..d845231223e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java @@ -240,7 +240,7 @@ public class TestRegionServerMetrics { rs.doMetrics(); for (HRegion r : TEST_UTIL.getMiniHBaseCluster().getRegions( Bytes.toBytes(TABLE_NAME))) { - for (Map.Entry storeEntry : r.getStores().entrySet()) { + for (Map.Entry storeEntry : r.getStores().entrySet()) { LOG.info("For region " + r.getRegionNameAsString() + ", CF " + Bytes.toStringBinary(storeEntry.getKey()) + " found store files " + ": " + storeEntry.getValue().getStorefiles()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index b7ab9762962..bc68a864753 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.regionserver.FlushRequester; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; @@ -181,7 +182,7 @@ public class TestWALReplay { // flush region and make major compaction destServer.getOnlineRegion(destRegion.getRegionName()).flushcache(); // wait to complete major compaction - for (Store store : destServer.getOnlineRegion(destRegion.getRegionName()) + for (HStore store : destServer.getOnlineRegion(destRegion.getRegionName()) .getStores().values()) { store.triggerMajorCompaction(); } @@ -421,7 +422,7 @@ public class TestWALReplay { final AtomicInteger countOfRestoredEdits = new AtomicInteger(0); HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) { @Override - protected boolean restoreEdit(Store s, KeyValue kv) { + protected boolean restoreEdit(HStore s, KeyValue kv) { boolean b = super.restoreEdit(s, kv); countOfRestoredEdits.incrementAndGet(); return b; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java index 7609e0ee540..654e5583612 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.Store; /** @@ -221,7 +222,7 @@ public class HFileArchiveTestingUtil { * @param store store that is archiving files * @return {@link Path} to the store archive directory for the given region */ - public static Path getStoreArchivePath(Configuration conf, HRegion region, Store store) { + public static Path getStoreArchivePath(Configuration conf, HRegion region, HStore store) { return HFileArchiveUtil.getStoreArchivePath(conf, region, store.getFamily().getName()); } @@ -233,7 +234,7 @@ public class HFileArchiveTestingUtil { HRegion region = servingRegions.get(0); // check that we actually have some store files that were archived - Store store = region.getStore(storeName); + HStore store = region.getStore(storeName); return HFileArchiveTestingUtil.getStoreArchivePath(util.getConfiguration(), region, store); } }