HDFS-10225. DataNode hot swap drives should disallow storage type changes. Contributed by Lei (Eddy) Xu.

(cherry picked from commit 132deb4cac)
This commit is contained in:
Xiao Chen 2016-07-21 16:41:02 -07:00
parent 2f1e352912
commit 1a115ea317
2 changed files with 35 additions and 1 deletions

View File

@ -630,7 +630,7 @@ public class DataNode extends ReconfigurableBase
* @param newVolumes a comma separated string that specifies the data volumes.
* @return changed volumes.
* @throws IOException if none of the directories are specified in the
* configuration.
* configuration, or the storage type of a directory is changed.
*/
@VisibleForTesting
ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException {
@ -642,6 +642,12 @@ public class DataNode extends ReconfigurableBase
throw new IOException("No directory is specified.");
}
// Use the existing StorageLocation to detect storage type changes.
Map<String, StorageLocation> existingLocations = new HashMap<>();
for (StorageLocation loc : getStorageLocations(this.conf)) {
existingLocations.put(loc.getFile().getCanonicalPath(), loc);
}
ChangedVolumes results = new ChangedVolumes();
results.newLocations.addAll(locations);
@ -655,6 +661,12 @@ public class DataNode extends ReconfigurableBase
if (location.getFile().getCanonicalPath().equals(
dir.getRoot().getCanonicalPath())) {
sl.remove();
StorageLocation old = existingLocations.get(
location.getFile().getCanonicalPath());
if (old != null &&
old.getStorageType() != location.getStorageType()) {
throw new IOException("Changing storage type is not allowed.");
}
results.unchangedLocations.add(location);
found = true;
break;

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.BlockMissingException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
@ -255,6 +256,27 @@ public class TestDataNodeHotSwapVolumes {
}
}
@Test
public void testParseStorageTypeChanges() throws IOException {
startDFSCluster(1, 1);
DataNode dn = cluster.getDataNodes().get(0);
Configuration conf = dn.getConf();
List<StorageLocation> oldLocations = DataNode.getStorageLocations(conf);
// Change storage type of an existing StorageLocation
String newLoc = String.format("[%s]%s", StorageType.SSD,
oldLocations.get(1).getUri());
String newDataDirs = oldLocations.get(0).toString() + "," + newLoc;
try {
dn.parseChangedVolumes(newDataDirs);
fail("should throw IOE because storage type changes.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"Changing storage type is not allowed", e);
}
}
/** Add volumes to the first DataNode. */
private void addVolumes(int numNewVolumes)
throws ReconfigurationException, IOException {