HBASE-3532 HRegion#equals is broken

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1084038 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2011-03-22 00:22:04 +00:00
parent c5eed8a344
commit d2ccdeb8dd
4 changed files with 19 additions and 46 deletions

View File

@ -43,6 +43,7 @@ Release 0.91.0 - Unreleased
creating scanner
HBASE-3641 LruBlockCache.CacheStats.getHitCount() is not using the
correct variable
HBASE-3532 HRegion#equals is broken (Ted Yu via Stack)
IMPROVEMENTS
HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack)

View File

@ -2357,7 +2357,7 @@ public class HRegion implements HeapSize { // , Writable{
if (!(o instanceof HRegion)) {
return false;
}
return this.hashCode() == ((HRegion)o).hashCode();
return Bytes.equals(this.regionInfo.getRegionName(), ((HRegion)o).regionInfo.getRegionName());
}
@Override

View File

@ -2410,31 +2410,6 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
}
}
/**
* @return Returns list of non-closed regions hosted on this server. If no
* regions to check, returns an empty list.
*/
protected Set<HRegion> getRegionsToCheck() {
HashSet<HRegion> regionsToCheck = new HashSet<HRegion>();
// TODO: is this locking necessary?
lock.readLock().lock();
try {
synchronized (this.onlineRegions) {
regionsToCheck.addAll(this.onlineRegions.values());
}
} finally {
lock.readLock().unlock();
}
// Purge closed regions.
for (final Iterator<HRegion> i = regionsToCheck.iterator(); i.hasNext();) {
HRegion r = i.next();
if (r.isClosed()) {
i.remove();
}
}
return regionsToCheck;
}
@Override
@QosPriority(priority=HIGH_QOS)
public long getProtocolVersion(final String protocol, final long clientVersion)

View File

@ -19,21 +19,9 @@
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Preconditions;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.ConcurrentModificationException;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
@ -47,6 +35,17 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Preconditions;
/**
* Thread that flushes cache on request
*
@ -152,19 +151,17 @@ class MemStoreFlusher extends Thread implements FlushRequester {
SortedMap<Long, HRegion> regionsBySize =
server.getCopyOfOnlineRegionsSortedBySize();
// TODO: HBASE-3532 - we can't use Set<HRegion> here because it doesn't
// implement equals correctly. So, set of region names.
Set<byte[]> excludedRegionNames = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
Set<HRegion> excludedRegions = new TreeSet<HRegion>();
boolean flushedOne = false;
while (!flushedOne) {
// Find the biggest region that doesn't have too many storefiles
// (might be null!)
HRegion bestFlushableRegion = getBiggestMemstoreRegion(
regionsBySize, excludedRegionNames, true);
regionsBySize, excludedRegions, true);
// Find the biggest region, total, even if it might have too many flushes.
HRegion bestAnyRegion = getBiggestMemstoreRegion(
regionsBySize, excludedRegionNames, false);
regionsBySize, excludedRegions, false);
if (bestAnyRegion == null) {
LOG.error("Above memory mark but there are no flushable regions!");
@ -201,7 +198,7 @@ class MemStoreFlusher extends Thread implements FlushRequester {
if (!flushedOne) {
LOG.info("Excluding unflushable region " + regionToFlush +
" - trying to find a different region to flush.");
excludedRegionNames.add(regionToFlush.getRegionName());
excludedRegions.add(regionToFlush);
}
}
return true;
@ -272,11 +269,11 @@ class MemStoreFlusher extends Thread implements FlushRequester {
private HRegion getBiggestMemstoreRegion(
SortedMap<Long, HRegion> regionsBySize,
Set<byte[]> excludedRegionNames,
Set<HRegion> excludedRegions,
boolean checkStoreFileCount) {
synchronized (regionsInQueue) {
for (HRegion region : regionsBySize.values()) {
if (excludedRegionNames.contains(region.getRegionName())) {
if (excludedRegions.contains(region)) {
continue;
}