HBASE-420 Lots of DNS queries from client
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1160981 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
cca758c679
commit
48bdcd353b
|
@ -212,6 +212,7 @@ Release 0.91.0 - Unreleased
|
|||
HBASE-4239 HBASE-4012 introduced duplicate variable Bytes.LONG_BYTES
|
||||
HBASE-4225 NoSuchColumnFamilyException in multi doesn't say which family
|
||||
is bad (Ramkrishna Vasudevan)
|
||||
HBASE-4220 Lots of DNS queries from client
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack)
|
||||
|
|
|
@ -94,10 +94,12 @@ public class HRegionLocation implements Comparable<HRegionLocation> {
|
|||
return regionInfo;
|
||||
}
|
||||
|
||||
/** @return HServerAddress
|
||||
/**
|
||||
* Do not use!!! Creates a HServerAddress instance which will do a resolve.
|
||||
* @return HServerAddress
|
||||
* @deprecated Use {@link #getHostnamePort}
|
||||
*/
|
||||
public HServerAddress getServerAddress(){
|
||||
public HServerAddress getServerAddress() {
|
||||
return new HServerAddress(this.hostname, this.port);
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,6 @@ import java.util.NoSuchElementException;
|
|||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CopyOnWriteArraySet;
|
||||
|
@ -1122,7 +1121,7 @@ public class HConnectionManager {
|
|||
if (tableLocations.put(startKey, location) == null) {
|
||||
LOG.debug("Cached location for " +
|
||||
location.getRegionInfo().getRegionNameAsString() +
|
||||
" is " + location.getServerAddress());
|
||||
" is " + location.getHostnamePort());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,18 +20,15 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import java.io.IOException;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRPC;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
/**
|
||||
* Abstract class that implements Callable, used by retryable actions.
|
||||
|
@ -68,7 +65,8 @@ public abstract class ServerCallable<T> implements Callable<T> {
|
|||
*/
|
||||
public void instantiateServer(boolean reload) throws IOException {
|
||||
this.location = connection.getRegionLocation(tableName, row, reload);
|
||||
this.server = connection.getHRegionConnection(location.getServerAddress());
|
||||
this.server = connection.getHRegionConnection(location.getHostname(),
|
||||
location.getPort());
|
||||
}
|
||||
|
||||
/** @return the server name */
|
||||
|
@ -76,7 +74,7 @@ public abstract class ServerCallable<T> implements Callable<T> {
|
|||
if (location == null) {
|
||||
return null;
|
||||
}
|
||||
return location.getServerAddress().toString();
|
||||
return location.getHostnamePort();
|
||||
}
|
||||
|
||||
/** @return the region name */
|
||||
|
|
|
@ -135,7 +135,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
|||
int lastPos = startPos + middle;
|
||||
lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos;
|
||||
String regionLocation = table.getRegionLocation(startKeys[startPos]).
|
||||
getServerAddress().getHostname();
|
||||
getHostname();
|
||||
splits[i] = new TableSplit(this.table.getTableName(),
|
||||
startKeys[startPos], ((i + 1) < realNumSplits) ? startKeys[lastPos]:
|
||||
HConstants.EMPTY_START_ROW, regionLocation);
|
||||
|
|
|
@ -141,7 +141,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
|
|||
continue;
|
||||
}
|
||||
String regionLocation = table.getRegionLocation(keys.getFirst()[i]).
|
||||
getServerAddress().getHostname();
|
||||
getHostname();
|
||||
byte[] startRow = scan.getStartRow();
|
||||
byte[] stopRow = scan.getStopRow();
|
||||
// determine if the given start an stop key fall into the region
|
||||
|
|
|
@ -49,8 +49,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.NoServerForRegionException;
|
||||
|
@ -373,11 +373,11 @@ public class RegionSplitter {
|
|||
// requests to the same RS can stall the outstanding split queue.
|
||||
// To fix, group the regions into an RS pool and round-robin through it
|
||||
LOG.debug("Bucketing regions by regionserver...");
|
||||
TreeMap<HServerAddress, LinkedList<Pair<byte[], byte[]>>> daughterRegions = Maps
|
||||
.newTreeMap();
|
||||
TreeMap<String, LinkedList<Pair<byte[], byte[]>>> daughterRegions =
|
||||
Maps.newTreeMap();
|
||||
for (Pair<byte[], byte[]> dr : tmpRegionSet) {
|
||||
HServerAddress rsLocation = table.getRegionLocation(dr.getSecond())
|
||||
.getServerAddress();
|
||||
String rsLocation = table.getRegionLocation(dr.getSecond()).
|
||||
getHostnamePort();
|
||||
if (!daughterRegions.containsKey(rsLocation)) {
|
||||
LinkedList<Pair<byte[], byte[]>> entry = Lists.newLinkedList();
|
||||
daughterRegions.put(rsLocation, entry);
|
||||
|
@ -401,9 +401,9 @@ public class RegionSplitter {
|
|||
LOG.debug(daughterRegions.size() + " RS have regions to splt.");
|
||||
|
||||
// Get RegionServer : region count mapping
|
||||
final TreeMap<HServerAddress, Integer> rsSizes = Maps.newTreeMap();
|
||||
Map<HRegionInfo, HServerAddress> regionsInfo = table.getRegionsInfo();
|
||||
for (HServerAddress rs : regionsInfo.values()) {
|
||||
final TreeMap<ServerName, Integer> rsSizes = Maps.newTreeMap();
|
||||
Map<HRegionInfo, ServerName> regionsInfo = table.getRegionLocations();
|
||||
for (ServerName rs : regionsInfo.values()) {
|
||||
if (rsSizes.containsKey(rs)) {
|
||||
rsSizes.put(rs, rsSizes.get(rs) + 1);
|
||||
} else {
|
||||
|
@ -412,17 +412,16 @@ public class RegionSplitter {
|
|||
}
|
||||
|
||||
// sort the RS by the number of regions they have
|
||||
List<HServerAddress> serversLeft = Lists.newArrayList(daughterRegions
|
||||
.keySet());
|
||||
Collections.sort(serversLeft, new Comparator<HServerAddress>() {
|
||||
public int compare(HServerAddress o1, HServerAddress o2) {
|
||||
List<String> serversLeft = Lists.newArrayList(daughterRegions .keySet());
|
||||
Collections.sort(serversLeft, new Comparator<String>() {
|
||||
public int compare(String o1, String o2) {
|
||||
return rsSizes.get(o1).compareTo(rsSizes.get(o2));
|
||||
}
|
||||
});
|
||||
|
||||
// round-robin through the RS list. Choose the lightest-loaded servers
|
||||
// first to keep the master from load-balancing regions as we split.
|
||||
for (HServerAddress rsLoc : serversLeft) {
|
||||
for (String rsLoc : serversLeft) {
|
||||
Pair<byte[], byte[]> dr = null;
|
||||
|
||||
// find a region in the RS list that hasn't been moved
|
||||
|
@ -437,7 +436,7 @@ public class RegionSplitter {
|
|||
HRegionLocation regionLoc = table.getRegionLocation(split);
|
||||
|
||||
// if this region moved locations
|
||||
HServerAddress newRs = regionLoc.getServerAddress();
|
||||
String newRs = regionLoc.getHostnamePort();
|
||||
if (newRs.compareTo(rsLoc) != 0) {
|
||||
LOG.debug("Region with " + splitAlgo.rowToStr(split)
|
||||
+ " moved to " + newRs + ". Relocating...");
|
||||
|
|
Loading…
Reference in New Issue