HBASE-7948 client doesn't need to refresh meta while the region is opening
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1459130 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1fab626bc7
commit
66d90f865e
|
@ -84,6 +84,9 @@ log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
|
||||||
log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
|
log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
|
||||||
#log4j.logger.org.apache.hadoop.dfs=DEBUG
|
#log4j.logger.org.apache.hadoop.dfs=DEBUG
|
||||||
# Set this class to log INFO only otherwise its OTT
|
# Set this class to log INFO only otherwise its OTT
|
||||||
|
# Enable this to get detailed connection error/retry logging.
|
||||||
|
# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
|
||||||
|
|
||||||
|
|
||||||
# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
|
# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
|
||||||
#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
|
#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
|
||||||
|
|
|
@ -40,6 +40,10 @@ public class HRegionLocation implements Comparable<HRegionLocation> {
|
||||||
// Cache of the hostname + port
|
// Cache of the hostname + port
|
||||||
private String cachedHostnamePort;
|
private String cachedHostnamePort;
|
||||||
|
|
||||||
|
public HRegionLocation(HRegionInfo regionInfo, ServerName serverName) {
|
||||||
|
this(regionInfo, serverName, HConstants.NO_SEQNUM);
|
||||||
|
}
|
||||||
|
|
||||||
public HRegionLocation(HRegionInfo regionInfo, ServerName serverName, long seqNum) {
|
public HRegionLocation(HRegionInfo regionInfo, ServerName serverName, long seqNum) {
|
||||||
this.regionInfo = regionInfo;
|
this.regionInfo = regionInfo;
|
||||||
this.serverName = serverName;
|
this.serverName = serverName;
|
||||||
|
|
|
@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.client.coprocessor.Batch;
|
||||||
import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
|
||||||
import org.apache.hadoop.hbase.exceptions.MasterNotRunningException;
|
import org.apache.hadoop.hbase.exceptions.MasterNotRunningException;
|
||||||
import org.apache.hadoop.hbase.exceptions.RegionMovedException;
|
import org.apache.hadoop.hbase.exceptions.RegionMovedException;
|
||||||
|
import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
|
||||||
import org.apache.hadoop.hbase.exceptions.RegionServerStoppedException;
|
import org.apache.hadoop.hbase.exceptions.RegionServerStoppedException;
|
||||||
import org.apache.hadoop.hbase.exceptions.TableNotFoundException;
|
import org.apache.hadoop.hbase.exceptions.TableNotFoundException;
|
||||||
import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
|
import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
|
||||||
|
@ -86,6 +87,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDe
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.hbase.util.Addressing;
|
import org.apache.hadoop.hbase.util.Addressing;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
import org.apache.hadoop.hbase.util.SoftValueSortedMap;
|
import org.apache.hadoop.hbase.util.SoftValueSortedMap;
|
||||||
import org.apache.hadoop.hbase.util.Triple;
|
import org.apache.hadoop.hbase.util.Triple;
|
||||||
|
@ -1065,8 +1067,8 @@ public class HConnectionManager {
|
||||||
return true; // don't cache it
|
return true; // don't cache it
|
||||||
}
|
}
|
||||||
// instantiate the location
|
// instantiate the location
|
||||||
HRegionLocation loc = new HRegionLocation(regionInfo, serverName,
|
long seqNum = HRegionInfo.getSeqNumDuringOpen(result);
|
||||||
HRegionInfo.getSeqNumDuringOpen(result));
|
HRegionLocation loc = new HRegionLocation(regionInfo, serverName, seqNum);
|
||||||
// cache this meta entry
|
// cache this meta entry
|
||||||
cacheLocation(tableName, null, loc);
|
cacheLocation(tableName, null, loc);
|
||||||
return true;
|
return true;
|
||||||
|
@ -1196,7 +1198,7 @@ public class HConnectionManager {
|
||||||
|
|
||||||
// Instantiate the location
|
// Instantiate the location
|
||||||
location = new HRegionLocation(regionInfo, serverName,
|
location = new HRegionLocation(regionInfo, serverName,
|
||||||
HRegionInfo.getSeqNumDuringOpen(regionInfoRow));
|
HRegionInfo.getSeqNumDuringOpen(regionInfoRow));
|
||||||
cacheLocation(tableName, null, location);
|
cacheLocation(tableName, null, location);
|
||||||
return location;
|
return location;
|
||||||
} catch (TableNotFoundException e) {
|
} catch (TableNotFoundException e) {
|
||||||
|
@ -1292,8 +1294,7 @@ public class HConnectionManager {
|
||||||
void forceDeleteCachedLocation(final byte [] tableName, final byte [] row) {
|
void forceDeleteCachedLocation(final byte [] tableName, final byte [] row) {
|
||||||
HRegionLocation rl = null;
|
HRegionLocation rl = null;
|
||||||
synchronized (this.cachedRegionLocations) {
|
synchronized (this.cachedRegionLocations) {
|
||||||
Map<byte[], HRegionLocation> tableLocations =
|
Map<byte[], HRegionLocation> tableLocations = getTableLocations(tableName);
|
||||||
getTableLocations(tableName);
|
|
||||||
// start to examine the cache. we can only do cache actions
|
// start to examine the cache. we can only do cache actions
|
||||||
// if there's something in the cache for this table.
|
// if there's something in the cache for this table.
|
||||||
if (!tableLocations.isEmpty()) {
|
if (!tableLocations.isEmpty()) {
|
||||||
|
@ -1853,7 +1854,7 @@ public class HConnectionManager {
|
||||||
*/
|
*/
|
||||||
void deleteCachedLocation(HRegionInfo hri, HRegionLocation source) {
|
void deleteCachedLocation(HRegionInfo hri, HRegionLocation source) {
|
||||||
boolean isStaleDelete = false;
|
boolean isStaleDelete = false;
|
||||||
HRegionLocation oldLocation;
|
HRegionLocation oldLocation = null;
|
||||||
synchronized (this.cachedRegionLocations) {
|
synchronized (this.cachedRegionLocations) {
|
||||||
Map<byte[], HRegionLocation> tableLocations =
|
Map<byte[], HRegionLocation> tableLocations =
|
||||||
getTableLocations(hri.getTableName());
|
getTableLocations(hri.getTableName());
|
||||||
|
@ -1902,6 +1903,9 @@ public class HConnectionManager {
|
||||||
rme.getHostname() + ":" + rme.getPort() + " according to " + source.getHostnamePort());
|
rme.getHostname() + ":" + rme.getPort() + " according to " + source.getHostnamePort());
|
||||||
updateCachedLocation(
|
updateCachedLocation(
|
||||||
regionInfo, source, rme.getServerName(), rme.getLocationSeqNum());
|
regionInfo, source, rme.getServerName(), rme.getLocationSeqNum());
|
||||||
|
} else if (RegionOpeningException.find(exception) != null) {
|
||||||
|
LOG.info("Region " + regionInfo.getRegionNameAsString() + " is being opened on "
|
||||||
|
+ source.getHostnamePort() + "; not deleting the cache entry");
|
||||||
} else {
|
} else {
|
||||||
deleteCachedLocation(regionInfo, source);
|
deleteCachedLocation(regionInfo, source);
|
||||||
}
|
}
|
||||||
|
@ -2163,13 +2167,11 @@ public class HConnectionManager {
|
||||||
// Retry all actions in toReplay then clear it.
|
// Retry all actions in toReplay then clear it.
|
||||||
if (!noRetry && !toReplay.isEmpty()) {
|
if (!noRetry && !toReplay.isEmpty()) {
|
||||||
if (isTraceEnabled) {
|
if (isTraceEnabled) {
|
||||||
LOG.trace("Retrying due to errors: " + retriedErrors.getDescriptionAndClear());
|
LOG.trace("Retrying due to errors" + (lastRetry ? " (one last time)" : "")
|
||||||
|
+ ": " + retriedErrors.getDescriptionAndClear());
|
||||||
}
|
}
|
||||||
doRetry();
|
doRetry();
|
||||||
if (lastRetry) {
|
if (lastRetry) {
|
||||||
if (isTraceEnabled) {
|
|
||||||
LOG.trace("No more retries");
|
|
||||||
}
|
|
||||||
noRetry = true;
|
noRetry = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2201,7 +2203,7 @@ public class HConnectionManager {
|
||||||
if (exceptions.isEmpty()) {
|
if (exceptions.isEmpty()) {
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
String result = makeException().getMessage();
|
String result = makeException().getExhaustiveDescription();
|
||||||
exceptions.clear();
|
exceptions.clear();
|
||||||
actions.clear();
|
actions.clear();
|
||||||
addresses.clear();
|
addresses.clear();
|
||||||
|
@ -2293,9 +2295,7 @@ public class HConnectionManager {
|
||||||
int getNumberOfCachedRegionLocations(final byte[] tableName) {
|
int getNumberOfCachedRegionLocations(final byte[] tableName) {
|
||||||
Integer key = Bytes.mapKey(tableName);
|
Integer key = Bytes.mapKey(tableName);
|
||||||
synchronized (this.cachedRegionLocations) {
|
synchronized (this.cachedRegionLocations) {
|
||||||
Map<byte[], HRegionLocation> tableLocs =
|
Map<byte[], HRegionLocation> tableLocs = this.cachedRegionLocations.get(key);
|
||||||
this.cachedRegionLocations.get(key);
|
|
||||||
|
|
||||||
if (tableLocs == null) {
|
if (tableLocs == null) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -121,18 +121,22 @@ extends RetriesExhaustedException {
|
||||||
|
|
||||||
public String getExhaustiveDescription() {
|
public String getExhaustiveDescription() {
|
||||||
StringWriter errorWriter = new StringWriter();
|
StringWriter errorWriter = new StringWriter();
|
||||||
|
PrintWriter pw = new PrintWriter(errorWriter);
|
||||||
for (int i = 0; i < this.exceptions.size(); ++i) {
|
for (int i = 0; i < this.exceptions.size(); ++i) {
|
||||||
Throwable t = this.exceptions.get(i);
|
Throwable t = this.exceptions.get(i);
|
||||||
Row action = this.actions.get(i);
|
Row action = this.actions.get(i);
|
||||||
String server = this.hostnameAndPort.get(i);
|
String server = this.hostnameAndPort.get(i);
|
||||||
errorWriter.append("Error #" + i + " from [" + server + "] for ["
|
pw.append("Error");
|
||||||
|
if (this.exceptions.size() > 1) {
|
||||||
|
pw.append(" #" + i);
|
||||||
|
}
|
||||||
|
pw.append(" from [" + server + "] for ["
|
||||||
+ ((action == null) ? "unknown key" : Bytes.toStringBinary(action.getRow())) + "]");
|
+ ((action == null) ? "unknown key" : Bytes.toStringBinary(action.getRow())) + "]");
|
||||||
if (t != null) {
|
if (t != null) {
|
||||||
PrintWriter pw = new PrintWriter(errorWriter);
|
|
||||||
t.printStackTrace(pw);
|
t.printStackTrace(pw);
|
||||||
pw.flush();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pw.flush();
|
||||||
return errorWriter.toString();
|
return errorWriter.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,78 @@
|
||||||
|
/*
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.exceptions;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Subclass if the server knows the region is now on another server.
|
||||||
|
* This allows the client to call the new region server without calling the master.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Evolving
|
||||||
|
public class RegionOpeningException extends NotServingRegionException {
|
||||||
|
private static final Log LOG = LogFactory.getLog(RegionOpeningException.class);
|
||||||
|
private static final long serialVersionUID = -7232903522310558395L;
|
||||||
|
|
||||||
|
public RegionOpeningException(String message) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Look for a RegionOpeningException in the exception:
|
||||||
|
* - hadoop.ipc wrapped exceptions
|
||||||
|
* - nested exceptions
|
||||||
|
* Returns null if we didn't find the exception.
|
||||||
|
* TODO: this code is mostly C/Ped from RegionMovedExecption. Due to the limitations of
|
||||||
|
* generics it's not amenable to generalizing without adding parameters/isAssignableFrom.
|
||||||
|
* Might make general if used in more places.
|
||||||
|
*/
|
||||||
|
public static RegionOpeningException find(Object exception) {
|
||||||
|
if (exception == null || !(exception instanceof Throwable)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
RegionOpeningException res = null;
|
||||||
|
Throwable cur = (Throwable)exception;
|
||||||
|
while (res == null && cur != null) {
|
||||||
|
if (cur instanceof RegionOpeningException) {
|
||||||
|
res = (RegionOpeningException) cur;
|
||||||
|
} else {
|
||||||
|
if (cur instanceof RemoteException) {
|
||||||
|
RemoteException re = (RemoteException) cur;
|
||||||
|
Exception e = re.unwrapRemoteException(RegionOpeningException.class);
|
||||||
|
if (e == null) {
|
||||||
|
e = re.unwrapRemoteException();
|
||||||
|
}
|
||||||
|
// unwrapRemoteException can return the exception given as a parameter when it cannot
|
||||||
|
// unwrap it. In this case, there is no need to look further
|
||||||
|
// noinspection ObjectEquality
|
||||||
|
if (e != re) {
|
||||||
|
res = find(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cur = cur.getCause();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
}
|
|
@ -74,6 +74,7 @@ import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
|
||||||
import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
|
import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
|
||||||
import org.apache.hadoop.hbase.exceptions.RegionAlreadyInTransitionException;
|
import org.apache.hadoop.hbase.exceptions.RegionAlreadyInTransitionException;
|
||||||
import org.apache.hadoop.hbase.exceptions.RegionMovedException;
|
import org.apache.hadoop.hbase.exceptions.RegionMovedException;
|
||||||
|
import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
|
||||||
import org.apache.hadoop.hbase.RegionServerStatusProtocol;
|
import org.apache.hadoop.hbase.RegionServerStatusProtocol;
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
@ -2409,9 +2410,12 @@ public class HRegionServer implements ClientProtocol,
|
||||||
MovedRegionInfo moveInfo = getMovedRegion(encodedRegionName);
|
MovedRegionInfo moveInfo = getMovedRegion(encodedRegionName);
|
||||||
if (moveInfo != null) {
|
if (moveInfo != null) {
|
||||||
throw new RegionMovedException(moveInfo.getServerName(), moveInfo.getSeqNum());
|
throw new RegionMovedException(moveInfo.getServerName(), moveInfo.getSeqNum());
|
||||||
} else {
|
|
||||||
throw new NotServingRegionException("Region is not online: " + encodedRegionName);
|
|
||||||
}
|
}
|
||||||
|
Boolean isOpening = this.regionsInTransitionInRS.get(Bytes.toBytes(encodedRegionName));
|
||||||
|
if (isOpening != null && isOpening.booleanValue()) {
|
||||||
|
throw new RegionOpeningException("Region is being opened: " + encodedRegionName);
|
||||||
|
}
|
||||||
|
throw new NotServingRegionException("Region is not online: " + encodedRegionName);
|
||||||
}
|
}
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,21 +36,17 @@ public class TestHRegionLocation {
|
||||||
@Test
|
@Test
|
||||||
public void testHashAndEqualsCode() {
|
public void testHashAndEqualsCode() {
|
||||||
ServerName hsa1 = new ServerName("localhost", 1234, -1L);
|
ServerName hsa1 = new ServerName("localhost", 1234, -1L);
|
||||||
HRegionLocation hrl1 = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,
|
HRegionLocation hrl1 = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, hsa1);
|
||||||
hsa1, HConstants.NO_SEQNUM);
|
HRegionLocation hrl2 = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, hsa1);
|
||||||
HRegionLocation hrl2 = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,
|
|
||||||
hsa1, HConstants.NO_SEQNUM);
|
|
||||||
assertEquals(hrl1.hashCode(), hrl2.hashCode());
|
assertEquals(hrl1.hashCode(), hrl2.hashCode());
|
||||||
assertTrue(hrl1.equals(hrl2));
|
assertTrue(hrl1.equals(hrl2));
|
||||||
HRegionLocation hrl3 = new HRegionLocation(HRegionInfo.ROOT_REGIONINFO,
|
HRegionLocation hrl3 = new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, hsa1);
|
||||||
hsa1, HConstants.NO_SEQNUM);
|
|
||||||
assertNotSame(hrl1, hrl3);
|
assertNotSame(hrl1, hrl3);
|
||||||
// They are equal because they have same location even though they are
|
// They are equal because they have same location even though they are
|
||||||
// carrying different regions or timestamp.
|
// carrying different regions or timestamp.
|
||||||
assertTrue(hrl1.equals(hrl3));
|
assertTrue(hrl1.equals(hrl3));
|
||||||
ServerName hsa2 = new ServerName("localhost", 12345, -1L);
|
ServerName hsa2 = new ServerName("localhost", 12345, -1L);
|
||||||
HRegionLocation hrl4 = new HRegionLocation(HRegionInfo.ROOT_REGIONINFO,
|
HRegionLocation hrl4 = new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, hsa2);
|
||||||
hsa2, HConstants.NO_SEQNUM);
|
|
||||||
// These have same HRI but different locations so should be different.
|
// These have same HRI but different locations so should be different.
|
||||||
assertFalse(hrl3.equals(hrl4));
|
assertFalse(hrl3.equals(hrl4));
|
||||||
HRegionLocation hrl5 = new HRegionLocation(hrl4.getRegionInfo(),
|
HRegionLocation hrl5 = new HRegionLocation(hrl4.getRegionInfo(),
|
||||||
|
@ -61,8 +57,7 @@ public class TestHRegionLocation {
|
||||||
@Test
|
@Test
|
||||||
public void testToString() {
|
public void testToString() {
|
||||||
ServerName hsa1 = new ServerName("localhost", 1234, -1L);
|
ServerName hsa1 = new ServerName("localhost", 1234, -1L);
|
||||||
HRegionLocation hrl1 = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,
|
HRegionLocation hrl1 = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, hsa1);
|
||||||
hsa1, HConstants.NO_SEQNUM);
|
|
||||||
System.out.println(hrl1.toString());
|
System.out.println(hrl1.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,10 +65,10 @@ public class TestHRegionLocation {
|
||||||
public void testCompareTo() {
|
public void testCompareTo() {
|
||||||
ServerName hsa1 = new ServerName("localhost", 1234, -1L);
|
ServerName hsa1 = new ServerName("localhost", 1234, -1L);
|
||||||
HRegionLocation hsl1 =
|
HRegionLocation hsl1 =
|
||||||
new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, hsa1, HConstants.NO_SEQNUM);
|
new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, hsa1);
|
||||||
ServerName hsa2 = new ServerName("localhost", 1235, -1L);
|
ServerName hsa2 = new ServerName("localhost", 1235, -1L);
|
||||||
HRegionLocation hsl2 =
|
HRegionLocation hsl2 =
|
||||||
new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, hsa2, HConstants.NO_SEQNUM);
|
new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, hsa2);
|
||||||
assertTrue(hsl1.compareTo(hsl1) == 0);
|
assertTrue(hsl1.compareTo(hsl1) == 0);
|
||||||
assertTrue(hsl2.compareTo(hsl2) == 0);
|
assertTrue(hsl2.compareTo(hsl2) == 0);
|
||||||
int compare1 = hsl1.compareTo(hsl2);
|
int compare1 = hsl1.compareTo(hsl2);
|
||||||
|
|
|
@ -330,7 +330,7 @@ public class TestCatalogTracker {
|
||||||
Mockito.doNothing().when(connection).close();
|
Mockito.doNothing().when(connection).close();
|
||||||
// Make it so we return any old location when asked.
|
// Make it so we return any old location when asked.
|
||||||
final HRegionLocation anyLocation =
|
final HRegionLocation anyLocation =
|
||||||
new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, SN, HConstants.NO_SEQNUM);
|
new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, SN);
|
||||||
Mockito.when(connection.getRegionLocation((byte[]) Mockito.any(),
|
Mockito.when(connection.getRegionLocation((byte[]) Mockito.any(),
|
||||||
(byte[]) Mockito.any(), Mockito.anyBoolean())).
|
(byte[]) Mockito.any(), Mockito.anyBoolean())).
|
||||||
thenReturn(anyLocation);
|
thenReturn(anyLocation);
|
||||||
|
|
|
@ -178,7 +178,7 @@ public class TestMetaReaderEditorNoCluster {
|
||||||
// Fix the location lookup so it 'works' though no network. First
|
// Fix the location lookup so it 'works' though no network. First
|
||||||
// make an 'any location' object.
|
// make an 'any location' object.
|
||||||
final HRegionLocation anyLocation =
|
final HRegionLocation anyLocation =
|
||||||
new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, sn, HConstants.NO_SEQNUM);
|
new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, sn);
|
||||||
// Return the any location object when locateRegion is called in HTable
|
// Return the any location object when locateRegion is called in HTable
|
||||||
// constructor and when its called by ServerCallable (it uses getRegionLocation).
|
// constructor and when its called by ServerCallable (it uses getRegionLocation).
|
||||||
// The ugly format below comes of 'Important gotcha on spying real objects!' from
|
// The ugly format below comes of 'Important gotcha on spying real objects!' from
|
||||||
|
|
|
@ -99,7 +99,7 @@ public class HConnectionTestingUtility {
|
||||||
HConnection c = HConnectionTestingUtility.getMockedConnection(conf);
|
HConnection c = HConnectionTestingUtility.getMockedConnection(conf);
|
||||||
Mockito.doNothing().when(c).close();
|
Mockito.doNothing().when(c).close();
|
||||||
// Make it so we return a particular location when asked.
|
// Make it so we return a particular location when asked.
|
||||||
final HRegionLocation loc = new HRegionLocation(hri, sn, HConstants.NO_SEQNUM);
|
final HRegionLocation loc = new HRegionLocation(hri, sn);
|
||||||
Mockito.when(c.getRegionLocation((byte[]) Mockito.any(),
|
Mockito.when(c.getRegionLocation((byte[]) Mockito.any(),
|
||||||
(byte[]) Mockito.any(), Mockito.anyBoolean())).
|
(byte[]) Mockito.any(), Mockito.anyBoolean())).
|
||||||
thenReturn(loc);
|
thenReturn(loc);
|
||||||
|
|
|
@ -424,7 +424,7 @@ public class TestHCM {
|
||||||
assertNotNull(location);
|
assertNotNull(location);
|
||||||
|
|
||||||
HRegionLocation anySource = new HRegionLocation(location.getRegionInfo(), new ServerName(
|
HRegionLocation anySource = new HRegionLocation(location.getRegionInfo(), new ServerName(
|
||||||
location.getHostname(), location.getPort() - 1, 0L), HConstants.NO_SEQNUM);
|
location.getHostname(), location.getPort() - 1, 0L));
|
||||||
|
|
||||||
// Same server as already in cache reporting - overwrites any value despite seqNum.
|
// Same server as already in cache reporting - overwrites any value despite seqNum.
|
||||||
int nextPort = location.getPort() + 1;
|
int nextPort = location.getPort() + 1;
|
||||||
|
|
|
@ -271,7 +271,7 @@ public class TestLoadIncrementalHFilesSplitRecovery {
|
||||||
Mockito.doNothing().when(c).close();
|
Mockito.doNothing().when(c).close();
|
||||||
// Make it so we return a particular location when asked.
|
// Make it so we return a particular location when asked.
|
||||||
final HRegionLocation loc = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,
|
final HRegionLocation loc = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,
|
||||||
new ServerName("example.org", 1234, 0), HConstants.NO_SEQNUM);
|
new ServerName("example.org", 1234, 0));
|
||||||
Mockito.when(c.getRegionLocation((byte[]) Mockito.any(),
|
Mockito.when(c.getRegionLocation((byte[]) Mockito.any(),
|
||||||
(byte[]) Mockito.any(), Mockito.anyBoolean())).
|
(byte[]) Mockito.any(), Mockito.anyBoolean())).
|
||||||
thenReturn(loc);
|
thenReturn(loc);
|
||||||
|
|
|
@ -66,3 +66,5 @@ log4j.logger.org.apache.hadoop.hbase=DEBUG
|
||||||
#See HBASE-4709
|
#See HBASE-4709
|
||||||
log4j.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR
|
log4j.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR
|
||||||
log4j.org.apache.hadoop.metrics2.util.MBeans=ERROR
|
log4j.org.apache.hadoop.metrics2.util.MBeans=ERROR
|
||||||
|
# Enable this to get detailed connection error/retry logging.
|
||||||
|
# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
|
||||||
|
|
Loading…
Reference in New Issue