HDFS-9141. Thread leak in Datanode#refreshVolumes. (Uma Maheswara Rao G via yliu)

This commit is contained in:
yliu 2015-09-29 22:05:34 +08:00
parent d6fa34e014
commit 715dbddf77
2 changed files with 9 additions and 5 deletions

View File

@ -1453,6 +1453,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9092. Nfs silently drops overlapping write requests and causes data HDFS-9092. Nfs silently drops overlapping write requests and causes data
copying to fail. (Yongjun Zhang) copying to fail. (Yongjun Zhang)
HDFS-9141. Thread leak in Datanode#refreshVolumes. (Uma Maheswara Rao G
via yliu)
Release 2.7.2 - UNRELEASED Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -25,9 +25,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISS
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
@ -604,7 +602,7 @@ public class DataNode extends ReconfigurableBase
private synchronized void refreshVolumes(String newVolumes) throws IOException { private synchronized void refreshVolumes(String newVolumes) throws IOException {
Configuration conf = getConf(); Configuration conf = getConf();
conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes); conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
ExecutorService service = null;
int numOldDataDirs = dataDirs.size(); int numOldDataDirs = dataDirs.size();
ChangedVolumes changedVolumes = parseChangedVolumes(newVolumes); ChangedVolumes changedVolumes = parseChangedVolumes(newVolumes);
StringBuilder errorMessageBuilder = new StringBuilder(); StringBuilder errorMessageBuilder = new StringBuilder();
@ -627,8 +625,8 @@ public class DataNode extends ReconfigurableBase
for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) { for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
nsInfos.add(bpos.getNamespaceInfo()); nsInfos.add(bpos.getNamespaceInfo());
} }
ExecutorService service = Executors.newFixedThreadPool( service = Executors
changedVolumes.newLocations.size()); .newFixedThreadPool(changedVolumes.newLocations.size());
List<Future<IOException>> exceptions = Lists.newArrayList(); List<Future<IOException>> exceptions = Lists.newArrayList();
for (final StorageLocation location : changedVolumes.newLocations) { for (final StorageLocation location : changedVolumes.newLocations) {
exceptions.add(service.submit(new Callable<IOException>() { exceptions.add(service.submit(new Callable<IOException>() {
@ -678,6 +676,9 @@ public class DataNode extends ReconfigurableBase
throw new IOException(errorMessageBuilder.toString()); throw new IOException(errorMessageBuilder.toString());
} }
} finally { } finally {
if (service != null) {
service.shutdown();
}
conf.set(DFS_DATANODE_DATA_DIR_KEY, conf.set(DFS_DATANODE_DATA_DIR_KEY,
Joiner.on(",").join(effectiveVolumes)); Joiner.on(",").join(effectiveVolumes));
dataDirs = getStorageLocations(conf); dataDirs = getStorageLocations(conf);