HBASE-826 delete table followed by recreation results in honked table

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@690101 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-08-29 05:09:21 +00:00
parent 7632f2b763
commit d1adcf1629
6 changed files with 135 additions and 128 deletions

View File

@ -42,6 +42,7 @@ Release 0.18.0 - Unreleased
HBASE-768 This message 'java.io.IOException: Install 0.1.x of hbase and run HBASE-768 This message 'java.io.IOException: Install 0.1.x of hbase and run
its migration first' is useless (Jean-Daniel Cryans via Jim its migration first' is useless (Jean-Daniel Cryans via Jim
Kellerman) Kellerman)
HBASE-826 Delete table followed by recreation results in honked table
IMPROVEMENTS IMPROVEMENTS
HBASE-801 When a table haven't disable, shell could response in a "user HBASE-801 When a table haven't disable, shell could response in a "user

View File

@ -27,10 +27,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.regionserver.HRegion;
/** /**
* Instantiated to delete a table. Table must be offline. * Instantiated to delete a table. Table must be offline.

View File

@ -1275,9 +1275,10 @@ public class HRegion implements HConstants {
/* /*
* Get <code>versions</code> keys matching the origin key's * Get <code>versions</code> keys matching the origin key's
* row/column/timestamp and those of an older vintage. * row/column/timestamp and those of an older vintage.
* Public so available when debugging.
* @param origin Where to start searching. * @param origin Where to start searching.
* @param versions How many versions to return. Pass * @param versions How many versions to return. Pass HConstants.ALL_VERSIONS
* {@link HConstants.ALL_VERSIONS} to retrieve all. * to retrieve all.
* @return Ordered list of <code>versions</code> keys going from newest back. * @return Ordered list of <code>versions</code> keys going from newest back.
* @throws IOException * @throws IOException
*/ */
@ -1500,7 +1501,7 @@ public class HRegion implements HConstants {
checkReadOnly(); checkReadOnly();
Integer lid = getLock(lockid,row); Integer lid = getLock(lockid,row);
try { try {
// Delete ALL verisons rather than MAX_VERSIONS. If we just did // Delete ALL versions rather than MAX_VERSIONS. If we just did
// MAX_VERSIONS, then if 2* MAX_VERSION cells, subsequent gets would // MAX_VERSIONS, then if 2* MAX_VERSION cells, subsequent gets would
// get old stuff. // get old stuff.
deleteMultiple(row, column, ts, ALL_VERSIONS); deleteMultiple(row, column, ts, ALL_VERSIONS);
@ -1657,8 +1658,8 @@ public class HRegion implements HConstants {
this.updatesLock.readLock().lock(); this.updatesLock.readLock().lock();
try { try {
if (writeToWAL) { if (writeToWAL) {
this.log.append(regionInfo.getRegionName(), regionInfo.getTableDesc() this.log.append(regionInfo.getRegionName(),
.getName(), updatesByColumn); regionInfo.getTableDesc().getName(), updatesByColumn);
} }
long size = 0; long size = 0;
for (Map.Entry<HStoreKey, byte[]> e: updatesByColumn.entrySet()) { for (Map.Entry<HStoreKey, byte[]> e: updatesByColumn.entrySet()) {
@ -1716,12 +1717,14 @@ public class HRegion implements HConstants {
this.conf, reporter); this.conf, reporter);
} }
/* /**
* @param column * Return HStore instance.
* Use with caution. Exposed for use of fixup utilities.
* @param column Name of column family hosted by this region.
* @return Store that goes with the family on passed <code>column</code>. * @return Store that goes with the family on passed <code>column</code>.
* TODO: Make this lookup faster. * TODO: Make this lookup faster.
*/ */
protected HStore getStore(final byte [] column) { public HStore getStore(final byte [] column) {
return this.stores.get(HStoreKey.getFamilyMapKey(column)); return this.stores.get(HStoreKey.getFamilyMapKey(column));
} }
@ -2226,8 +2229,7 @@ public class HRegion implements HConstants {
public static void removeRegionFromMETA(final HRegionInterface srvr, public static void removeRegionFromMETA(final HRegionInterface srvr,
final byte [] metaRegionName, final byte [] regionName) final byte [] metaRegionName, final byte [] regionName)
throws IOException { throws IOException {
srvr.deleteAll(metaRegionName, regionName, HConstants.LATEST_TIMESTAMP, srvr.deleteAll(metaRegionName, regionName, HConstants.LATEST_TIMESTAMP, -1L);
(long)-1L);
} }
/** /**
@ -2248,7 +2250,7 @@ public class HRegion implements HConstants {
b.delete(COL_STARTCODE); b.delete(COL_STARTCODE);
// If carrying splits, they'll be in place when we show up on new // If carrying splits, they'll be in place when we show up on new
// server. // server.
srvr.batchUpdate(metaRegionName, b, (long)-1L); srvr.batchUpdate(metaRegionName, b, -1L);
} }
/** /**

View File

@ -739,6 +739,7 @@ public class HStore implements HConstants {
if (this.storefiles.size() <= 0) { if (this.storefiles.size() <= 0) {
return null; return null;
} }
// filesToCompact are sorted oldest to newest.
filesToCompact = new ArrayList<HStoreFile>(this.storefiles.values()); filesToCompact = new ArrayList<HStoreFile>(this.storefiles.values());
// The max-sequenceID in any of the to-be-compacted TreeMaps is the // The max-sequenceID in any of the to-be-compacted TreeMaps is the
@ -805,12 +806,12 @@ public class HStore implements HConstants {
* the caching associated with the currently-loaded ones. Our iteration- * the caching associated with the currently-loaded ones. Our iteration-
* based access pattern is practically designed to ruin the cache. * based access pattern is practically designed to ruin the cache.
*/ */
List<MapFile.Reader> readers = new ArrayList<MapFile.Reader>(); List<MapFile.Reader> rdrs = new ArrayList<MapFile.Reader>();
for (HStoreFile file: filesToCompact) { for (HStoreFile file: filesToCompact) {
try { try {
HStoreFile.BloomFilterMapFile.Reader reader = HStoreFile.BloomFilterMapFile.Reader reader =
file.getReader(fs, false, false); file.getReader(fs, false, false);
readers.add(reader); rdrs.add(reader);
// Compute the size of the new bloomfilter if needed // Compute the size of the new bloomfilter if needed
if (this.family.isBloomfilter()) { if (this.family.isBloomfilter()) {
@ -821,28 +822,24 @@ public class HStore implements HConstants {
// exception message so output a message here where we know the // exception message so output a message here where we know the
// culprit. // culprit.
LOG.warn("Failed with " + e.toString() + ": " + file.toString()); LOG.warn("Failed with " + e.toString() + ": " + file.toString());
closeCompactionReaders(readers); closeCompactionReaders(rdrs);
throw e; throw e;
} }
} }
// Storefiles are keyed by sequence id. The oldest file comes first.
// We need to return out of here a List that has the newest file first.
Collections.reverse(readers);
// Step through them, writing to the brand-new MapFile // Step through them, writing to the brand-new MapFile
HStoreFile compactedOutputFile = new HStoreFile(conf, fs, HStoreFile compactedOutputFile = new HStoreFile(conf, fs,
this.compactionDir, info.getEncodedName(), family.getName(), this.compactionDir, info.getEncodedName(), family.getName(),
-1L, null); -1L, null);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("started compaction of " + readers.size() + " files into " + LOG.debug("started compaction of " + rdrs.size() + " files into " +
FSUtils.getPath(compactedOutputFile.getMapFilePath())); FSUtils.getPath(compactedOutputFile.getMapFilePath()));
} }
MapFile.Writer writer = compactedOutputFile.getWriter(this.fs, MapFile.Writer writer = compactedOutputFile.getWriter(this.fs,
this.compression, this.family.isBloomfilter(), nrows); this.compression, this.family.isBloomfilter(), nrows);
writer.setIndexInterval(family.getMapFileIndexInterval()); writer.setIndexInterval(family.getMapFileIndexInterval());
try { try {
compactHStoreFiles(writer, readers); compact(writer, rdrs);
} finally { } finally {
writer.close(); writer.close();
} }
@ -864,14 +861,20 @@ public class HStore implements HConstants {
* Compact a list of MapFile.Readers into MapFile.Writer. * Compact a list of MapFile.Readers into MapFile.Writer.
* *
* We work by iterating through the readers in parallel. We always increment * We work by iterating through the readers in parallel. We always increment
* the lowest-ranked one. * the lowest-ranked one. Updates to a single row/column will appear ranked
* Updates to a single row/column will appear ranked by timestamp. This allows * by timestamp.
* us to throw out deleted values or obsolete versions. * @param compactedOut Where to write compaction.
* @param pReaders List of readers sorted oldest to newest.
* @throws IOException
*/ */
private void compactHStoreFiles(final MapFile.Writer compactedOut, private void compact(final MapFile.Writer compactedOut,
final List<MapFile.Reader> readers) final List<MapFile.Reader> pReaders)
throws IOException { throws IOException {
MapFile.Reader[] rdrs = readers.toArray(new MapFile.Reader[readers.size()]); // Reverse order so we newest is first.
List<MapFile.Reader> copy = new ArrayList<MapFile.Reader>(pReaders.size());
Collections.copy(copy, pReaders);
Collections.reverse(copy);
MapFile.Reader[] rdrs = pReaders.toArray(new MapFile.Reader[copy.size()]);
try { try {
HStoreKey[] keys = new HStoreKey[rdrs.length]; HStoreKey[] keys = new HStoreKey[rdrs.length];
ImmutableBytesWritable[] vals = new ImmutableBytesWritable[rdrs.length]; ImmutableBytesWritable[] vals = new ImmutableBytesWritable[rdrs.length];
@ -897,7 +900,6 @@ public class HStore implements HConstants {
int timesSeen = 0; int timesSeen = 0;
byte [] lastRow = null; byte [] lastRow = null;
byte [] lastColumn = null; byte [] lastColumn = null;
while (numDone < done.length) { while (numDone < done.length) {
// Find the reader with the smallest key. If two files have same key // Find the reader with the smallest key. If two files have same key
// but different values -- i.e. one is delete and other is non-delete // but different values -- i.e. one is delete and other is non-delete
@ -917,8 +919,6 @@ public class HStore implements HConstants {
} }
} }
} }
// Reflect the current key/val in the output
HStoreKey sk = keys[smallestKey]; HStoreKey sk = keys[smallestKey];
if (Bytes.equals(lastRow, sk.getRow()) if (Bytes.equals(lastRow, sk.getRow())
&& Bytes.equals(lastColumn, sk.getColumn())) { && Bytes.equals(lastColumn, sk.getColumn())) {
@ -1172,7 +1172,7 @@ public class HStore implements HConstants {
/** /**
* @return Array of readers ordered oldest to newest. * @return Array of readers ordered oldest to newest.
*/ */
MapFile.Reader [] getReaders() { public MapFile.Reader [] getReaders() {
return this.readers.values(). return this.readers.values().
toArray(new MapFile.Reader[this.readers.size()]); toArray(new MapFile.Reader[this.readers.size()]);
} }
@ -1306,9 +1306,8 @@ public class HStore implements HConstants {
} }
/** /**
* Get <code>versions</code> keys matching the origin key's * Get <code>versions</code> of keys matching the origin key's
* row/column/timestamp and those of an older vintage. * row/column/timestamp and those of an older vintage.
* Default access so can be accessed out of {@link HRegionServer}.
* @param origin Where to start searching. * @param origin Where to start searching.
* @param numVersions How many versions to return. Pass * @param numVersions How many versions to return. Pass
* {@link HConstants.ALL_VERSIONS} to retrieve all. * {@link HConstants.ALL_VERSIONS} to retrieve all.
@ -1351,10 +1350,9 @@ public class HStore implements HConstants {
do { do {
// if the row matches, we might want this one. // if the row matches, we might want this one.
if (rowMatches(origin, readkey)) { if (rowMatches(origin, readkey)) {
// if the cell matches, then we definitely want this key. // if the cell address matches, then we definitely want this key.
if (cellMatches(origin, readkey)) { if (cellMatches(origin, readkey)) {
// Store the key if it isn't deleted or superceeded by what's // Store key if isn't deleted or superceded by memcache
// in the memcache
if (!HLogEdit.isDeleted(readval.get())) { if (!HLogEdit.isDeleted(readval.get())) {
if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) { if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) {
keys.add(new HStoreKey(readkey)); keys.add(new HStoreKey(readkey));
@ -1363,7 +1361,6 @@ public class HStore implements HConstants {
break; break;
} }
} else { } else {
// Is this copy necessary?
deletes.add(new HStoreKey(readkey)); deletes.add(new HStoreKey(readkey));
} }
} else { } else {
@ -1410,8 +1407,6 @@ public class HStore implements HConstants {
// and columns that match those set on the scanner and which have delete // and columns that match those set on the scanner and which have delete
// values. If memory usage becomes an issue, could redo as bloom filter. // values. If memory usage becomes an issue, could redo as bloom filter.
Set<HStoreKey> deletes = new HashSet<HStoreKey>(); Set<HStoreKey> deletes = new HashSet<HStoreKey>();
this.lock.readLock().lock(); this.lock.readLock().lock();
try { try {
// First go to the memcache. Pick up deletes and candidates. // First go to the memcache. Pick up deletes and candidates.
@ -1425,7 +1420,8 @@ public class HStore implements HConstants {
rowAtOrBeforeFromMapFile(maparray[i], row, candidateKeys, deletes); rowAtOrBeforeFromMapFile(maparray[i], row, candidateKeys, deletes);
} }
// Return the best key from candidateKeys // Return the best key from candidateKeys
byte [] result = candidateKeys.isEmpty()? null: candidateKeys.lastKey().getRow(); byte [] result =
candidateKeys.isEmpty()? null: candidateKeys.lastKey().getRow();
return result; return result;
} finally { } finally {
this.lock.readLock().unlock(); this.lock.readLock().unlock();
@ -1550,18 +1546,15 @@ public class HStore implements HConstants {
// as a candidate key // as a candidate key
if (Bytes.equals(readkey.getRow(), searchKey.getRow())) { if (Bytes.equals(readkey.getRow(), searchKey.getRow())) {
if (!HLogEdit.isDeleted(readval.get())) { if (!HLogEdit.isDeleted(readval.get())) {
if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) { if (handleNonDelete(readkey, now, deletes, candidateKeys)) {
candidateKeys.put(stripTimestamp(readkey),
new Long(readkey.getTimestamp()));
foundCandidate = true; foundCandidate = true;
// NOTE! Continue. // NOTE! Continue.
continue; continue;
} }
} }
// Deleted value. HStoreKey copy = addCopyToDeletes(readkey, deletes);
deletes.add(readkey);
if (deletedOrExpiredRow == null) { if (deletedOrExpiredRow == null) {
deletedOrExpiredRow = new HStoreKey(readkey); deletedOrExpiredRow = copy;
} }
} else if (Bytes.compareTo(readkey.getRow(), searchKey.getRow()) > 0) { } else if (Bytes.compareTo(readkey.getRow(), searchKey.getRow()) > 0) {
// if the row key we just read is beyond the key we're searching for, // if the row key we just read is beyond the key we're searching for,
@ -1572,16 +1565,15 @@ public class HStore implements HConstants {
// we're seeking yet, so this row is a candidate for closest // we're seeking yet, so this row is a candidate for closest
// (assuming that it isn't a delete). // (assuming that it isn't a delete).
if (!HLogEdit.isDeleted(readval.get())) { if (!HLogEdit.isDeleted(readval.get())) {
if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) { if (handleNonDelete(readkey, now, deletes, candidateKeys)) {
candidateKeys.put(stripTimestamp(readkey),
new Long(readkey.getTimestamp()));
foundCandidate = true; foundCandidate = true;
// NOTE: Continue
continue; continue;
} }
} }
deletes.add(readkey); HStoreKey copy = addCopyToDeletes(readkey, deletes);
if (deletedOrExpiredRow == null) { if (deletedOrExpiredRow == null) {
deletedOrExpiredRow = new HStoreKey(readkey); deletedOrExpiredRow = copy;
} }
} }
} while(map.next(readkey, readval) && (knownNoGoodKey == null || } while(map.next(readkey, readval) && (knownNoGoodKey == null ||
@ -1603,6 +1595,18 @@ public class HStore implements HConstants {
// through here. // through here.
} }
/*
* @param key Key to copy and add to <code>deletes</code>
* @param deletes
* @return Instance of the copy added to <code>deletes</code>
*/
private HStoreKey addCopyToDeletes(final HStoreKey key,
final Set<HStoreKey> deletes) {
HStoreKey copy = new HStoreKey(key);
deletes.add(copy);
return copy;
}
private void rowAtOrBeforeWithCandidates(final HStoreKey startKey, private void rowAtOrBeforeWithCandidates(final HStoreKey startKey,
final MapFile.Reader map, final byte[] row, final MapFile.Reader map, final byte[] row,
final SortedMap<HStoreKey, Long> candidateKeys, final SortedMap<HStoreKey, Long> candidateKeys,
@ -1631,56 +1635,79 @@ public class HStore implements HConstants {
} }
do { do {
HStoreKey strippedKey = null;
// if we have an exact match on row, and it's not a delete, save this // if we have an exact match on row, and it's not a delete, save this
// as a candidate key // as a candidate key
if (Bytes.equals(readkey.getRow(), row)) { if (Bytes.equals(readkey.getRow(), row)) {
strippedKey = stripTimestamp(readkey); handleKey(readkey, readval.get(), now, deletes, candidateKeys);
if (!HLogEdit.isDeleted(readval.get())) {
if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) {
candidateKeys.put(strippedKey,
new Long(readkey.getTimestamp()));
}
} else {
// If the candidate keys contain any that might match by timestamp,
// then check for a match and remove it if it's too young to
// survive the delete
if (candidateKeys.containsKey(strippedKey)) {
long bestCandidateTs =
candidateKeys.get(strippedKey).longValue();
if (bestCandidateTs <= readkey.getTimestamp()) {
candidateKeys.remove(strippedKey);
}
}
}
} else if (Bytes.compareTo(readkey.getRow(), row) > 0 ) { } else if (Bytes.compareTo(readkey.getRow(), row) > 0 ) {
// if the row key we just read is beyond the key we're searching for, // if the row key we just read is beyond the key we're searching for,
// then we're done. // then we're done.
break; break;
} else { } else {
strippedKey = stripTimestamp(readkey);
// So, the row key doesn't match, but we haven't gone past the row // So, the row key doesn't match, but we haven't gone past the row
// we're seeking yet, so this row is a candidate for closest // we're seeking yet, so this row is a candidate for closest
// (assuming that it isn't a delete). // (assuming that it isn't a delete).
if (!HLogEdit.isDeleted(readval.get())) { handleKey(readkey, readval.get(), now, deletes, candidateKeys);
if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) {
candidateKeys.put(strippedKey, Long.valueOf(readkey.getTimestamp()));
} }
} while(map.next(readkey, readval));
}
/*
* @param readkey
* @param now
* @param deletes
* @param candidateKeys
*/
private void handleKey(final HStoreKey readkey, final byte [] value,
final long now, final Set<HStoreKey> deletes,
final SortedMap<HStoreKey, Long> candidateKeys) {
if (!HLogEdit.isDeleted(value)) {
handleNonDelete(readkey, now, deletes, candidateKeys);
} else { } else {
// If the candidate keys contain any that might match by timestamp, // Pass copy because readkey will change next time next is called.
// then check for a match and remove it if it's too young to handleDeleted(new HStoreKey(readkey), candidateKeys, deletes);
// survive the delete }
}
/*
* @param readkey
* @param now
* @param deletes
* @param candidateKeys
* @return True if we added a candidate.
*/
private boolean handleNonDelete(final HStoreKey readkey, final long now,
final Set<HStoreKey> deletes, final Map<HStoreKey, Long> candidateKeys) {
if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) {
candidateKeys.put(stripTimestamp(readkey),
Long.valueOf(readkey.getTimestamp()));
return true;
}
return false;
}
/* Handle keys whose values hold deletes.
* Add to the set of deletes and then if the candidate keys contain any that
* might match by timestamp, then check for a match and remove it if it's too
* young to survive the delete
* @param k Be careful; if key was gotten from a Mapfile, pass in a copy.
* Values gotten by 'nexting' out of Mapfiles will change in each invocation.
* @param candidateKeys
* @param deletes
*/
static void handleDeleted(final HStoreKey k,
final SortedMap<HStoreKey, Long> candidateKeys,
final Set<HStoreKey> deletes) {
deletes.add(k);
HStoreKey strippedKey = stripTimestamp(k);
if (candidateKeys.containsKey(strippedKey)) { if (candidateKeys.containsKey(strippedKey)) {
long bestCandidateTs = long bestCandidateTs =
candidateKeys.get(strippedKey).longValue(); candidateKeys.get(strippedKey).longValue();
if (bestCandidateTs <= readkey.getTimestamp()) { if (bestCandidateTs <= k.getTimestamp()) {
candidateKeys.remove(strippedKey); candidateKeys.remove(strippedKey);
} }
} }
} }
}
} while(map.next(readkey, readval));
}
/* /*
* @param mf MapFile to dig in. * @param mf MapFile to dig in.
@ -1697,8 +1724,8 @@ public class HStore implements HConstants {
return new HStoreKey(key.getRow(), key.getColumn()); return new HStoreKey(key.getRow(), key.getColumn());
} }
/** /*
* Test that the <i>target</i> matches the <i>origin</i>. If the * Test that the <i>target</i> matches the <i>origin</i> cell address. If the
* <i>origin</i> has an empty column, then it's assumed to mean any column * <i>origin</i> has an empty column, then it's assumed to mean any column
* matches and only match on row and timestamp. Otherwise, it compares the * matches and only match on row and timestamp. Otherwise, it compares the
* keys with HStoreKey.matchesRowCol(). * keys with HStoreKey.matchesRowCol().
@ -1719,7 +1746,7 @@ public class HStore implements HConstants {
return target.matchesRowCol(origin); return target.matchesRowCol(origin);
} }
/** /*
* Test that the <i>target</i> matches the <i>origin</i>. If the <i>origin</i> * Test that the <i>target</i> matches the <i>origin</i>. If the <i>origin</i>
* has an empty column, then it just tests row equivalence. Otherwise, it uses * has an empty column, then it just tests row equivalence. Otherwise, it uses
* HStoreKey.matchesRowCol(). * HStoreKey.matchesRowCol().

View File

@ -395,14 +395,13 @@ class Memcache {
found_key = key_iterator.next(); found_key = key_iterator.next();
if (Bytes.compareTo(found_key.getRow(), row) <= 0) { if (Bytes.compareTo(found_key.getRow(), row) <= 0) {
if (HLogEdit.isDeleted(tailMap.get(found_key))) { if (HLogEdit.isDeleted(tailMap.get(found_key))) {
handleDeleted(found_key, candidateKeys, deletes); HStore.handleDeleted(found_key, candidateKeys, deletes);
if (deletedOrExpiredRow == null) { if (deletedOrExpiredRow == null) {
deletedOrExpiredRow = found_key; deletedOrExpiredRow = found_key;
} }
} else { } else {
if (HStore.notExpiredAndNotInDeletes(this.ttl, found_key, now, deletes)) { if (HStore.notExpiredAndNotInDeletes(this.ttl, found_key, now, deletes)) {
HStoreKey strippedKey = stripTimestamp(found_key); candidateKeys.put(stripTimestamp(found_key),
candidateKeys.put(strippedKey,
new Long(found_key.getTimestamp())); new Long(found_key.getTimestamp()));
} else { } else {
if (deletedOrExpiredRow == null) { if (deletedOrExpiredRow == null) {
@ -493,7 +492,7 @@ class Memcache {
do { do {
HStoreKey found_key = key_iterator.next(); HStoreKey found_key = key_iterator.next();
if (HLogEdit.isDeleted(thisRowTailMap.get(found_key))) { if (HLogEdit.isDeleted(thisRowTailMap.get(found_key))) {
handleDeleted(found_key, candidateKeys, deletes); HStore.handleDeleted(found_key, candidateKeys, deletes);
} else { } else {
if (ttl == HConstants.FOREVER || if (ttl == HConstants.FOREVER ||
now < found_key.getTimestamp() + ttl || now < found_key.getTimestamp() + ttl ||
@ -512,20 +511,6 @@ class Memcache {
} }
} }
private void handleDeleted(final HStoreKey k,
final SortedMap<HStoreKey, Long> candidateKeys,
final Set<HStoreKey> deletes) {
deletes.add(k);
HStoreKey strippedKey = stripTimestamp(k);
if (candidateKeys.containsKey(strippedKey)) {
long bestCandidateTs =
candidateKeys.get(strippedKey).longValue();
if (bestCandidateTs <= k.getTimestamp()) {
candidateKeys.remove(strippedKey);
}
}
}
static HStoreKey stripTimestamp(HStoreKey key) { static HStoreKey stripTimestamp(HStoreKey key) {
return new HStoreKey(key.getRow(), key.getColumn()); return new HStoreKey(key.getRow(), key.getColumn());
} }

View File

@ -82,15 +82,9 @@ public class MetaUtils {
* @throws IOException * @throws IOException
*/ */
private void initialize() throws IOException { private void initialize() throws IOException {
this.fs = FileSystem.get(this.conf); // get DFS handle this.fs = FileSystem.get(this.conf);
// Get root directory of HBase installation // Get root directory of HBase installation
this.rootdir = fs.makeQualified(new Path(this.conf.get(HConstants.HBASE_DIR))); this.rootdir = FSUtils.getRootDir(this.conf);
if (!fs.exists(rootdir)) {
String message = "HBase root directory " + rootdir.toString() +
" does not exist.";
LOG.error(message);
throw new FileNotFoundException(message);
}
} }
/** @return the HLog /** @return the HLog