HBASE-532 Odd interaction between HRegion.get, HRegion.deleteAll and compactions

Found an issue in the 532 commit.  The MemcacheScanner had a flipped isWildcardScanner
test; fixing it returned wrong columns because using okCols rather than the literals
passed in.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@648912 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-04-17 01:01:50 +00:00
parent 373761483f
commit ef5bb6f316
4 changed files with 22 additions and 19 deletions

View File

@ -131,10 +131,6 @@ public abstract class HAbstractScanner implements InternalScanner {
this.wildcardMatch = true; this.wildcardMatch = true;
} }
matchers.add(matcher); matchers.add(matcher);
// TODO: Does this multipleMatchers matter any more now that scanners
// are done at the store level? It might have mattered when scanners
// could have been done at the region level when memcache was at the
// region level rather than down here at store level.
if (matchers.size() > 1) { if (matchers.size() > 1) {
this.multipleMatchers = true; this.multipleMatchers = true;
} }

View File

@ -939,7 +939,6 @@ public class HRegion implements HConstants {
return -1; return -1;
} }
long startTime = System.currentTimeMillis(); long startTime = System.currentTimeMillis();
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("Started memcache flush for region " + LOG.debug("Started memcache flush for region " +
this.regionInfo.getRegionName() + ". Size " + this.regionInfo.getRegionName() + ". Size " +
@ -948,13 +947,11 @@ public class HRegion implements HConstants {
// We reset the aggregate memcache size here so that subsequent updates // We reset the aggregate memcache size here so that subsequent updates
// will add to the unflushed size // will add to the unflushed size
this.memcacheSize.set(0L); this.memcacheSize.set(0L);
this.flushRequested = false; this.flushRequested = false;
// Record latest flush time // Record latest flush time
this.lastFlushTime = System.currentTimeMillis(); this.lastFlushTime = System.currentTimeMillis();
for (HStore hstore: stores.values()) { for (HStore hstore: stores.values()) {
hstore.snapshotMemcache(); hstore.snapshotMemcache();
} }

View File

@ -590,8 +590,9 @@ public class HStore implements HConstants {
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
/** /**
* Prior to doing a cache flush, we need to snapshot the memcache. Locking is * Prior to doing a cache flush, we need to snapshot the memcache.
* handled by the memcache. * TODO: This method is ugly. Why let client of HStore run snapshots. How
* do we know they'll be cleaned up?
*/ */
void snapshotMemcache() { void snapshotMemcache() {
this.memcache.snapshot(); this.memcache.snapshot();

View File

@ -23,8 +23,10 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException; import java.io.IOException;
import java.rmi.UnexpectedException; import java.rmi.UnexpectedException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -75,7 +77,7 @@ class Memcache {
} }
/** /**
* Creates a snapshot of the current Memcache * Creates a snapshot of the current Memcache or returns existing snapshot.
* Must be followed by a call to {@link #clearSnapshot(SortedMap)} * Must be followed by a call to {@link #clearSnapshot(SortedMap)}
* @return Snapshot. Never null. May have no entries. * @return Snapshot. Never null. May have no entries.
*/ */
@ -84,8 +86,9 @@ class Memcache {
try { try {
// If snapshot has entries, then flusher failed or didn't call cleanup. // If snapshot has entries, then flusher failed or didn't call cleanup.
if (this.snapshot.size() > 0) { if (this.snapshot.size() > 0) {
LOG.warn("Returning extant snapshot. Is there another ongoing " + LOG.debug("Returning existing snapshot. Either the snapshot was run " +
"flush or did last attempt fail?"); "by the region -- normal operation but to be fixed -- or there is " +
"another ongoing flush or did we fail last attempt?");
return this.snapshot; return this.snapshot;
} }
// We used to synchronize on the memcache here but we're inside a // We used to synchronize on the memcache here but we're inside a
@ -236,10 +239,10 @@ class Memcache {
/** /**
* Return all the available columns for the given key. The key indicates a * Return all the available columns for the given key. The key indicates a
* row and timestamp, but not a column name. * row and timestamp, but not a column name.
*
* The returned object should map column names to byte arrays (byte[]).
* @param key * @param key
* @param results * @param columns Pass null for all columns else the wanted subset.
* @param deletes Map to accumulate deletes found.
* @param results Where to stick row results found.
*/ */
void getFull(HStoreKey key, Set<Text> columns, Map<Text, Long> deletes, void getFull(HStoreKey key, Set<Text> columns, Map<Text, Long> deletes,
Map<Text, Cell> results) { Map<Text, Cell> results) {
@ -565,7 +568,7 @@ class Memcache {
private class MemcacheScanner extends HAbstractScanner { private class MemcacheScanner extends HAbstractScanner {
private Text currentRow; private Text currentRow;
private final Set<Text> columns; private Set<Text> columns = null;
MemcacheScanner(final long timestamp, final Text targetCols[], MemcacheScanner(final long timestamp, final Text targetCols[],
final Text firstRow) final Text firstRow)
@ -577,7 +580,13 @@ class Memcache {
// If we're being asked to scan explicit columns rather than all in // If we're being asked to scan explicit columns rather than all in
// a family or columns that match regexes, cache the sorted array of // a family or columns that match regexes, cache the sorted array of
// columns. // columns.
this.columns = this.isWildcardScanner()? this.okCols.keySet(): null; this.columns = null;
if (!isWildcardScanner()) {
this.columns = new HashSet<Text>();
for (int i = 0; i < targetCols.length; i++) {
this.columns.add(targetCols[i]);
}
}
} }
@Override @Override