HBASE-1101 NPE in HConnectionManager.processBatchOfRows
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@731817 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
5f8b9e3317
commit
700b2a782d
|
@ -130,6 +130,7 @@ Release 0.19.0 - Unreleased
|
||||||
HBASE-1107 NPE in HStoreScanner.updateReaders
|
HBASE-1107 NPE in HStoreScanner.updateReaders
|
||||||
HBASE-1083 Will keep scheduling major compactions if last time one ran, we
|
HBASE-1083 Will keep scheduling major compactions if last time one ran, we
|
||||||
didn't.
|
didn't.
|
||||||
|
HBASE-1101 NPE in HConnectionManager$TableServers.processBatchOfRows
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-901 Add a limit to key length, check key and value length on client side
|
HBASE-901 Add a limit to key length, check key and value length on client side
|
||||||
|
|
|
@ -873,31 +873,64 @@ public class HConnectionManager implements HConstants {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private HRegionLocation
|
||||||
|
getRegionLocationForRowWithRetries(byte[] tableName, byte[] rowKey)
|
||||||
|
throws IOException {
|
||||||
|
getMaster();
|
||||||
|
List<Throwable> exceptions = new ArrayList<Throwable>();
|
||||||
|
HRegionLocation location = null;
|
||||||
|
int tries = 0;
|
||||||
|
boolean reload = false;
|
||||||
|
while (tries < numRetries) {
|
||||||
|
try {
|
||||||
|
location = getRegionLocation(tableName, rowKey, reload);
|
||||||
|
} catch (Throwable t) {
|
||||||
|
exceptions.add(t);
|
||||||
|
}
|
||||||
|
if (location != null) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
reload = true;
|
||||||
|
tries++;
|
||||||
|
try {
|
||||||
|
Thread.sleep(getPauseTime(tries));
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
// continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (location == null) {
|
||||||
|
throw new RetriesExhaustedException("Some server",
|
||||||
|
HConstants.EMPTY_BYTE_ARRAY, rowKey, tries, exceptions);
|
||||||
|
}
|
||||||
|
return location;
|
||||||
|
}
|
||||||
|
|
||||||
public void processBatchOfRows(ArrayList<BatchUpdate> list, byte[] tableName)
|
public void processBatchOfRows(ArrayList<BatchUpdate> list, byte[] tableName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// See HBASE-748 for pseudo code of this method
|
|
||||||
if (list.isEmpty()) {
|
if (list.isEmpty()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
boolean retryOnlyOne = false;
|
boolean retryOnlyOne = false;
|
||||||
|
int tries = 0;
|
||||||
Collections.sort(list);
|
Collections.sort(list);
|
||||||
List<BatchUpdate> tempUpdates = new ArrayList<BatchUpdate>();
|
List<BatchUpdate> tempUpdates = new ArrayList<BatchUpdate>();
|
||||||
byte [] currentRegion = getRegionLocation(tableName, list.get(0).getRow(),
|
HRegionLocation location =
|
||||||
false).getRegionInfo().getRegionName();
|
getRegionLocationForRowWithRetries(tableName, list.get(0).getRow());
|
||||||
|
byte [] currentRegion = location.getRegionInfo().getRegionName();
|
||||||
byte [] region = currentRegion;
|
byte [] region = currentRegion;
|
||||||
boolean isLastRow = false;
|
boolean isLastRow = false;
|
||||||
int tries = 0;
|
|
||||||
for (int i = 0; i < list.size() && tries < numRetries; i++) {
|
for (int i = 0; i < list.size() && tries < numRetries; i++) {
|
||||||
BatchUpdate batchUpdate = list.get(i);
|
BatchUpdate batchUpdate = list.get(i);
|
||||||
tempUpdates.add(batchUpdate);
|
tempUpdates.add(batchUpdate);
|
||||||
isLastRow = (i + 1) == list.size();
|
isLastRow = (i + 1) == list.size();
|
||||||
if (!isLastRow) {
|
if (!isLastRow) {
|
||||||
region = getRegionLocation(tableName, list.get(i + 1).getRow(), false)
|
location = getRegionLocationForRowWithRetries(tableName,
|
||||||
.getRegionInfo().getRegionName();
|
list.get(i+1).getRow());
|
||||||
|
region = location.getRegionInfo().getRegionName();
|
||||||
}
|
}
|
||||||
if (!Bytes.equals(currentRegion, region) || isLastRow || retryOnlyOne) {
|
if (!Bytes.equals(currentRegion, region) || isLastRow || retryOnlyOne) {
|
||||||
final BatchUpdate[] updates = tempUpdates.toArray(new BatchUpdate[0]);
|
final BatchUpdate[] updates = tempUpdates.toArray(new BatchUpdate[0]);
|
||||||
int index = getRegionServerForWithoutRetries(new ServerCallable<Integer>(
|
int index = getRegionServerWithRetries(new ServerCallable<Integer>(
|
||||||
this, tableName, batchUpdate.getRow()) {
|
this, tableName, batchUpdate.getRow()) {
|
||||||
public Integer call() throws IOException {
|
public Integer call() throws IOException {
|
||||||
int i = server.batchUpdates(location.getRegionInfo()
|
int i = server.batchUpdates(location.getRegionInfo()
|
||||||
|
@ -926,8 +959,9 @@ public class HConnectionManager implements HConstants {
|
||||||
}
|
}
|
||||||
i = i - updates.length + index;
|
i = i - updates.length + index;
|
||||||
retryOnlyOne = true;
|
retryOnlyOne = true;
|
||||||
region = getRegionLocation(tableName, list.get(i + 1).getRow(),
|
location = getRegionLocationForRowWithRetries(tableName,
|
||||||
true).getRegionInfo().getRegionName();
|
list.get(i + 1).getRow());
|
||||||
|
region = location.getRegionInfo().getRegionName();
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
retryOnlyOne = false;
|
retryOnlyOne = false;
|
||||||
|
|
|
@ -15,13 +15,13 @@ Automatically created by Tomcat JspC.
|
||||||
</servlet>
|
</servlet>
|
||||||
|
|
||||||
<servlet>
|
<servlet>
|
||||||
<servlet-name>org.apache.hadoop.hbase.generated.master.regionhistorian_jsp</servlet-name>
|
<servlet-name>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-name>
|
||||||
<servlet-class>org.apache.hadoop.hbase.generated.master.regionhistorian_jsp</servlet-class>
|
<servlet-class>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-class>
|
||||||
</servlet>
|
</servlet>
|
||||||
|
|
||||||
<servlet>
|
<servlet>
|
||||||
<servlet-name>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-name>
|
<servlet-name>org.apache.hadoop.hbase.generated.master.regionhistorian_jsp</servlet-name>
|
||||||
<servlet-class>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-class>
|
<servlet-class>org.apache.hadoop.hbase.generated.master.regionhistorian_jsp</servlet-class>
|
||||||
</servlet>
|
</servlet>
|
||||||
|
|
||||||
<servlet-mapping>
|
<servlet-mapping>
|
||||||
|
@ -29,15 +29,15 @@ Automatically created by Tomcat JspC.
|
||||||
<url-pattern>/master.jsp</url-pattern>
|
<url-pattern>/master.jsp</url-pattern>
|
||||||
</servlet-mapping>
|
</servlet-mapping>
|
||||||
|
|
||||||
<servlet-mapping>
|
|
||||||
<servlet-name>org.apache.hadoop.hbase.generated.master.regionhistorian_jsp</servlet-name>
|
|
||||||
<url-pattern>/regionhistorian.jsp</url-pattern>
|
|
||||||
</servlet-mapping>
|
|
||||||
|
|
||||||
<servlet-mapping>
|
<servlet-mapping>
|
||||||
<servlet-name>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-name>
|
<servlet-name>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-name>
|
||||||
<url-pattern>/table.jsp</url-pattern>
|
<url-pattern>/table.jsp</url-pattern>
|
||||||
</servlet-mapping>
|
</servlet-mapping>
|
||||||
|
|
||||||
|
<servlet-mapping>
|
||||||
|
<servlet-name>org.apache.hadoop.hbase.generated.master.regionhistorian_jsp</servlet-name>
|
||||||
|
<url-pattern>/regionhistorian.jsp</url-pattern>
|
||||||
|
</servlet-mapping>
|
||||||
|
|
||||||
</web-app>
|
</web-app>
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue