diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 182464badd1..2c90b23e559 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1453,6 +1453,9 @@ Release 2.8.0 - UNRELEASED HDFS-9092. Nfs silently drops overlapping write requests and causes data copying to fail. (Yongjun Zhang) + HDFS-9141. Thread leak in Datanode#refreshVolumes. (Uma Maheswara Rao G + via yliu) + Release 2.7.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 264608936b6..2fe67fdaaa9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -25,9 +25,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY; @@ -604,7 +602,7 @@ ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException { private synchronized void refreshVolumes(String newVolumes) throws IOException { Configuration conf = getConf(); conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes); - + ExecutorService service = null; int numOldDataDirs = dataDirs.size(); ChangedVolumes changedVolumes = parseChangedVolumes(newVolumes); StringBuilder errorMessageBuilder = new StringBuilder(); @@ -627,8 +625,8 @@ private synchronized void refreshVolumes(String newVolumes) throws IOException { for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) { nsInfos.add(bpos.getNamespaceInfo()); } - ExecutorService service = Executors.newFixedThreadPool( - changedVolumes.newLocations.size()); + service = Executors + .newFixedThreadPool(changedVolumes.newLocations.size()); List> exceptions = Lists.newArrayList(); for (final StorageLocation location : changedVolumes.newLocations) { exceptions.add(service.submit(new Callable() { @@ -678,6 +676,9 @@ public IOException call() { throw new IOException(errorMessageBuilder.toString()); } } finally { + if (service != null) { + service.shutdown(); + } conf.set(DFS_DATANODE_DATA_DIR_KEY, Joiner.on(",").join(effectiveVolumes)); dataDirs = getStorageLocations(conf);