diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0461766e5b7..a4b969af6d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1106,6 +1106,9 @@ Release 2.8.0 - UNRELEASED HDFS-9092. Nfs silently drops overlapping write requests and causes data copying to fail. (Yongjun Zhang) + HDFS-9141. Thread leak in Datanode#refreshVolumes. (Uma Maheswara Rao G + via yliu) + Release 2.7.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 022989a859b..d1e0160d03d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -25,9 +25,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY; @@ -611,7 +609,7 @@ public class DataNode extends ReconfigurableBase private synchronized void refreshVolumes(String newVolumes) throws IOException { Configuration conf = getConf(); conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes); - + ExecutorService service = null; int numOldDataDirs = dataDirs.size(); ChangedVolumes changedVolumes = parseChangedVolumes(newVolumes); StringBuilder errorMessageBuilder = new StringBuilder(); @@ -634,8 +632,8 @@ public class DataNode extends ReconfigurableBase for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) { nsInfos.add(bpos.getNamespaceInfo()); } - ExecutorService service = Executors.newFixedThreadPool( - changedVolumes.newLocations.size()); + service = Executors + .newFixedThreadPool(changedVolumes.newLocations.size()); List> exceptions = Lists.newArrayList(); for (final StorageLocation location : changedVolumes.newLocations) { exceptions.add(service.submit(new Callable() { @@ -685,6 +683,9 @@ public class DataNode extends ReconfigurableBase throw new IOException(errorMessageBuilder.toString()); } } finally { + if (service != null) { + service.shutdown(); + } conf.set(DFS_DATANODE_DATA_DIR_KEY, Joiner.on(",").join(effectiveVolumes)); dataDirs = getStorageLocations(conf);