HDFS-4058. DirectoryScanner may fail with IOOB if the directory scanning threads return out of volume order. Contributed by Eli Collins
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1398617 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b6d441277c
commit
85bd8e0b72
|
@ -114,6 +114,9 @@ Release 2.0.3-alpha - Unreleased
|
||||||
|
|
||||||
HDFS-3678. Edit log files are never being purged from 2NN. (atm)
|
HDFS-3678. Edit log files are never being purged from 2NN. (atm)
|
||||||
|
|
||||||
|
HDFS-4058. DirectoryScanner may fail with IOOB if the directory
|
||||||
|
scanning threads return out of volume order. (eli)
|
||||||
|
|
||||||
Release 2.0.2-alpha - 2012-09-07
|
Release 2.0.2-alpha - 2012-09-07
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -431,16 +431,16 @@ public class DirectoryScanner implements Runnable {
|
||||||
private Map<String, ScanInfo[]> getDiskReport() {
|
private Map<String, ScanInfo[]> getDiskReport() {
|
||||||
// First get list of data directories
|
// First get list of data directories
|
||||||
final List<? extends FsVolumeSpi> volumes = dataset.getVolumes();
|
final List<? extends FsVolumeSpi> volumes = dataset.getVolumes();
|
||||||
ArrayList<ScanInfoPerBlockPool> dirReports =
|
|
||||||
new ArrayList<ScanInfoPerBlockPool>(volumes.size());
|
// Use an array since the threads may return out of order and
|
||||||
|
// compilersInProgress#keySet may return out of order as well.
|
||||||
|
ScanInfoPerBlockPool[] dirReports = new ScanInfoPerBlockPool[volumes.size()];
|
||||||
|
|
||||||
Map<Integer, Future<ScanInfoPerBlockPool>> compilersInProgress =
|
Map<Integer, Future<ScanInfoPerBlockPool>> compilersInProgress =
|
||||||
new HashMap<Integer, Future<ScanInfoPerBlockPool>>();
|
new HashMap<Integer, Future<ScanInfoPerBlockPool>>();
|
||||||
|
|
||||||
for (int i = 0; i < volumes.size(); i++) {
|
for (int i = 0; i < volumes.size(); i++) {
|
||||||
if (!isValid(dataset, volumes.get(i))) {
|
if (isValid(dataset, volumes.get(i))) {
|
||||||
// volume is invalid
|
|
||||||
dirReports.add(i, null);
|
|
||||||
} else {
|
|
||||||
ReportCompiler reportCompiler =
|
ReportCompiler reportCompiler =
|
||||||
new ReportCompiler(volumes.get(i));
|
new ReportCompiler(volumes.get(i));
|
||||||
Future<ScanInfoPerBlockPool> result =
|
Future<ScanInfoPerBlockPool> result =
|
||||||
|
@ -452,7 +452,7 @@ public class DirectoryScanner implements Runnable {
|
||||||
for (Entry<Integer, Future<ScanInfoPerBlockPool>> report :
|
for (Entry<Integer, Future<ScanInfoPerBlockPool>> report :
|
||||||
compilersInProgress.entrySet()) {
|
compilersInProgress.entrySet()) {
|
||||||
try {
|
try {
|
||||||
dirReports.add(report.getKey(), report.getValue().get());
|
dirReports[report.getKey()] = report.getValue().get();
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
LOG.error("Error compiling report", ex);
|
LOG.error("Error compiling report", ex);
|
||||||
// Propagate ex to DataBlockScanner to deal with
|
// Propagate ex to DataBlockScanner to deal with
|
||||||
|
@ -465,7 +465,7 @@ public class DirectoryScanner implements Runnable {
|
||||||
for (int i = 0; i < volumes.size(); i++) {
|
for (int i = 0; i < volumes.size(); i++) {
|
||||||
if (isValid(dataset, volumes.get(i))) {
|
if (isValid(dataset, volumes.get(i))) {
|
||||||
// volume is still valid
|
// volume is still valid
|
||||||
list.addAll(dirReports.get(i));
|
list.addAll(dirReports[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue