diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 5d6514ea241..cd8212fec9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -625,7 +625,7 @@ static class ChangedVolumes { * @param newVolumes a comma separated string that specifies the data volumes. * @return changed volumes. * @throws IOException if none of the directories are specified in the - * configuration. + * configuration, or the storage type of a directory is changed. */ @VisibleForTesting ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException { @@ -637,6 +637,12 @@ ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException { throw new IOException("No directory is specified."); } + // Use the existing StorageLocation to detect storage type changes. + Map existingLocations = new HashMap<>(); + for (StorageLocation loc : getStorageLocations(this.conf)) { + existingLocations.put(loc.getFile().getCanonicalPath(), loc); + } + ChangedVolumes results = new ChangedVolumes(); results.newLocations.addAll(locations); @@ -650,6 +656,12 @@ ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException { if (location.getFile().getCanonicalPath().equals( dir.getRoot().getCanonicalPath())) { sl.remove(); + StorageLocation old = existingLocations.get( + location.getFile().getCanonicalPath()); + if (old != null && + old.getStorageType() != location.getStorageType()) { + throw new IOException("Changing storage type is not allowed."); + } results.unchangedLocations.add(location); found = true; break; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 0328e109acf..c03b02b0ad6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.BlockMissingException; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -255,6 +256,27 @@ public void testParseChangedVolumesFailures() throws IOException { } } + @Test + public void testParseStorageTypeChanges() throws IOException { + startDFSCluster(1, 1); + DataNode dn = cluster.getDataNodes().get(0); + Configuration conf = dn.getConf(); + List oldLocations = DataNode.getStorageLocations(conf); + + // Change storage type of an existing StorageLocation + String newLoc = String.format("[%s]%s", StorageType.SSD, + oldLocations.get(1).getUri()); + String newDataDirs = oldLocations.get(0).toString() + "," + newLoc; + + try { + dn.parseChangedVolumes(newDataDirs); + fail("should throw IOE because storage type changes."); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains( + "Changing storage type is not allowed", e); + } + } + /** Add volumes to the first DataNode. */ private void addVolumes(int numNewVolumes) throws ReconfigurationException, IOException {