HBASE-8123 Replace HashMap/HashSet with TreeMap/TreeSet where byte[] is used as key
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1457089 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
167766c262
commit
16589af634
|
@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -38,6 +39,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
@ -89,7 +91,7 @@ public class HTableMultiplexer {
|
|||
this.serverToBufferQueueMap = new ConcurrentHashMap<HRegionLocation,
|
||||
LinkedBlockingQueue<PutStatus>>();
|
||||
this.serverToFlushWorkerMap = new ConcurrentHashMap<HRegionLocation, HTableFlushWorker>();
|
||||
this.tableNameToHTableMap = new ConcurrentHashMap<byte[], HTable>();
|
||||
this.tableNameToHTableMap = new ConcurrentSkipListMap<byte[], HTable>(Bytes.BYTES_COMPARATOR);
|
||||
this.retryNum = conf.getInt("hbase.client.retries.number", 10);
|
||||
this.perRegionServerBufferQueueSize = perRegionServerBufferQueueSize;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package org.apache.hadoop.hbase.mapreduce;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
|
@ -74,7 +74,7 @@ public class IndexBuilder {
|
|||
public static class Map extends
|
||||
Mapper<ImmutableBytesWritable, Result, ImmutableBytesWritable, Put> {
|
||||
private byte[] family;
|
||||
private HashMap<byte[], ImmutableBytesWritable> indexes;
|
||||
private TreeMap<byte[], ImmutableBytesWritable> indexes;
|
||||
|
||||
@Override
|
||||
protected void map(ImmutableBytesWritable rowKey, Result result, Context context)
|
||||
|
@ -101,7 +101,7 @@ public class IndexBuilder {
|
|||
String[] fields = configuration.getStrings("index.fields");
|
||||
String familyName = configuration.get("index.familyname");
|
||||
family = Bytes.toBytes(familyName);
|
||||
indexes = new HashMap<byte[], ImmutableBytesWritable>();
|
||||
indexes = new TreeMap<byte[], ImmutableBytesWritable>(Bytes.BYTES_COMPARATOR);
|
||||
for(String field : fields) {
|
||||
// if the table is "people" and the field to index is "email", then the
|
||||
// index table will be called "people-email"
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package org.apache.hadoop.hbase.mapreduce;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -27,6 +27,7 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.mapreduce.Reducer;
|
||||
|
||||
/**
|
||||
|
@ -54,7 +55,7 @@ public class PutCombiner<K> extends Reducer<K, Put, K, Put> {
|
|||
// flush could result in multiple Puts for a single rowkey. That is
|
||||
// acceptable because Combiner is run as an optimization and it's not
|
||||
// critical that all Puts are grouped perfectly.
|
||||
Map<byte[], Put> puts = new HashMap<byte[], Put>();
|
||||
Map<byte[], Put> puts = new TreeMap<byte[], Put>(Bytes.BYTES_COMPARATOR);
|
||||
for (Put p : vals) {
|
||||
cnt++;
|
||||
if (!puts.containsKey(p.getRow())) {
|
||||
|
|
|
@ -27,7 +27,7 @@ import java.net.URLEncoder;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -172,7 +172,8 @@ class FSHLog implements HLog, Syncable {
|
|||
* contains the regions that are currently flushing. That way we can store two numbers for
|
||||
* flushing and non-flushing (oldestUnflushedSeqNums) memstore for the same region.
|
||||
*/
|
||||
private final Map<byte[], Long> oldestFlushingSeqNums = new HashMap<byte[], Long>();
|
||||
private final Map<byte[], Long> oldestFlushingSeqNums =
|
||||
new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
|
||||
|
||||
private volatile boolean closed = false;
|
||||
|
||||
|
|
|
@ -19,8 +19,6 @@ import java.net.InetAddress;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -485,7 +483,7 @@ public class AccessController extends BaseRegionObserver
|
|||
public void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
|
||||
HTableDescriptor desc, HRegionInfo[] regions) throws IOException {
|
||||
Set<byte[]> families = desc.getFamiliesKeys();
|
||||
HashMap<byte[], Set<byte[]>> familyMap = Maps.newHashMapWithExpectedSize(families.size());
|
||||
Map<byte[], Set<byte[]>> familyMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
|
||||
for (byte[] family: families) {
|
||||
familyMap.put(family, null);
|
||||
}
|
||||
|
@ -969,9 +967,9 @@ public class AccessController extends BaseRegionObserver
|
|||
final Increment increment)
|
||||
throws IOException {
|
||||
// Create a map of family to qualifiers.
|
||||
Map<byte[], Set<byte[]>> familyMap = Maps.newHashMap();
|
||||
Map<byte[], Set<byte[]>> familyMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
|
||||
for (Map.Entry<byte [], List<? extends Cell>> entry: increment.getFamilyMap().entrySet()) {
|
||||
Set<byte []> qualifiers = new HashSet<byte []>(entry.getValue().size());
|
||||
Set<byte[]> qualifiers = Sets.newTreeSet(Bytes.BYTES_COMPARATOR);
|
||||
for (Cell cell: entry.getValue()) {
|
||||
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
|
||||
qualifiers.add(kv.getQualifier());
|
||||
|
@ -1257,10 +1255,12 @@ public class AccessController extends BaseRegionObserver
|
|||
Bytes.toString(tperm.getTable())));
|
||||
}
|
||||
|
||||
HashMap<byte[], Set<byte[]>> familyMap = Maps.newHashMapWithExpectedSize(1);
|
||||
Map<byte[], Set<byte[]>> familyMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
|
||||
if (tperm.getFamily() != null) {
|
||||
if (tperm.getQualifier() != null) {
|
||||
familyMap.put(tperm.getFamily(), Sets.newHashSet(tperm.getQualifier()));
|
||||
Set<byte[]> qualifiers = Sets.newTreeSet(Bytes.BYTES_COMPARATOR);
|
||||
qualifiers.add(tperm.getQualifier());
|
||||
familyMap.put(tperm.getFamily(), qualifiers);
|
||||
} else {
|
||||
familyMap.put(tperm.getFamily(), null);
|
||||
}
|
||||
|
@ -1345,7 +1345,7 @@ public class AccessController extends BaseRegionObserver
|
|||
return null;
|
||||
}
|
||||
|
||||
Map<byte[], Collection<byte[]>> familyMap = Maps.newHashMapWithExpectedSize(1);
|
||||
Map<byte[], Collection<byte[]>> familyMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
|
||||
familyMap.put(family, qualifier != null ? ImmutableSet.of(qualifier) : null);
|
||||
return familyMap;
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import static org.junit.Assert.*;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.HashMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -317,7 +317,7 @@ public class TestHLog {
|
|||
*/
|
||||
@Test
|
||||
public void testFindMemstoresWithEditsEqualOrOlderThan() throws IOException {
|
||||
Map<byte [], Long> regionsToSeqids = new HashMap<byte [], Long>();
|
||||
Map<byte [], Long> regionsToSeqids = new TreeMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Long l = Long.valueOf(i);
|
||||
regionsToSeqids.put(l.toString().getBytes(), l);
|
||||
|
|
Loading…
Reference in New Issue