From 5dbb0325df4f95e5f2ab48fc8c627d1b6807eb42 Mon Sep 17 00:00:00 2001 From: Colin Patrick Mccabe Date: Tue, 20 Jan 2015 20:11:09 -0800 Subject: [PATCH] HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via Colin P. McCabe) (cherry picked from commit a17584936cc5141e3f5612ac3ecf35e27968e439) (cherry picked from commit 7779f38e68ca4e0f7ac08eb7e5f4801b89979d02) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java (cherry picked from commit 65ae3e2ff16ce1114a0115ff916837b0173b77f1) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../fsdataset/impl/FsDatasetImpl.java | 16 ++++---- .../datanode/fsdataset/impl/FsVolumeList.java | 8 +++- .../fsdataset/impl/TestFsDatasetImpl.java | 37 +++++++++++++++++-- 4 files changed, 52 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d7ff237718c..979632154e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -88,6 +88,9 @@ Release 2.6.1 - UNRELEASED HDFS-7885. Datanode should not trust the generation stamp provided by client. (Tsz Wo Nicholas Sze via jing9) + HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via + Colin P. McCabe) + Release 2.6.0 - 2014-11-18 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 0c2337e6281..cbcf6b87a17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -336,7 +336,7 @@ class FsDatasetImpl implements FsDatasetSpi { StorageType storageType = location.getStorageType(); final FsVolumeImpl fsVolume = new FsVolumeImpl( - this, sd.getStorageUuid(), dir, this.conf, storageType); + this, sd.getStorageUuid(), sd.getCurrentDir(), this.conf, storageType); final ReplicaMap tempVolumeMap = new ReplicaMap(fsVolume); ArrayList exceptions = Lists.newArrayList(); @@ -379,19 +379,19 @@ class FsDatasetImpl implements FsDatasetSpi { */ @Override public synchronized void removeVolumes(Collection volumes) { - Set volumeSet = new HashSet(); + Set volumeSet = new HashSet(); for (StorageLocation sl : volumes) { - volumeSet.add(sl.getFile()); + volumeSet.add(sl.getFile().getAbsolutePath()); } for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) { Storage.StorageDirectory sd = dataStorage.getStorageDir(idx); - if (volumeSet.contains(sd.getRoot())) { - String volume = sd.getRoot().toString(); + String volume = sd.getRoot().getAbsolutePath(); + if (volumeSet.contains(volume)) { LOG.info("Removing " + volume + " from FsDataset."); // Disable the volume from the service. asyncDiskService.removeVolume(sd.getCurrentDir()); - this.volumes.removeVolume(volume); + this.volumes.removeVolume(sd.getRoot()); // Removed all replica information for the blocks on the volume. Unlike // updating the volumeMap in addVolume(), this operation does not scan @@ -401,7 +401,9 @@ class FsDatasetImpl implements FsDatasetSpi { for (Iterator it = volumeMap.replicas(bpid).iterator(); it.hasNext(); ) { ReplicaInfo block = it.next(); - if (block.getVolume().getBasePath().equals(volume)) { + String absBasePath = + new File(block.getVolume().getBasePath()).getAbsolutePath(); + if (absBasePath.equals(volume)) { invalidate(bpid, block); blocks.add(block); it.remove(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java index 94834449d83..b17b90be121 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; +import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -278,13 +279,16 @@ class FsVolumeList { * Dynamically remove volume in the list. * @param volume the volume to be removed. */ - void removeVolume(String volume) { + void removeVolume(File volume) { // Make a copy of volumes to remove one volume. final FsVolumeImpl[] curVolumes = volumes.get(); final List volumeList = Lists.newArrayList(curVolumes); for (Iterator it = volumeList.iterator(); it.hasNext(); ) { FsVolumeImpl fsVolume = it.next(); - if (fsVolume.getBasePath().equals(volume)) { + String basePath, targetPath; + basePath = new File(fsVolume.getBasePath()).getAbsolutePath(); + targetPath = volume.getAbsolutePath(); + if (basePath.equals(targetPath)) { // Make sure the removed volume is the one in the curVolumes. removeVolume(fsVolume); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index 71b97484b1c..09934c2a221 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -47,6 +47,7 @@ import org.mockito.stubbing.Answer; import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -73,6 +74,7 @@ public class TestFsDatasetImpl { private static final String BASE_DIR = new FileSystemTestHelper().getTestRootDir(); private static final int NUM_INIT_VOLUMES = 2; + private static final String CLUSTER_ID = "cluser-id"; private static final String[] BLOCK_POOL_IDS = {"bpid-0", "bpid-1"}; // Use to generate storageUuid @@ -139,10 +141,11 @@ public class TestFsDatasetImpl { Set expectedVolumes = new HashSet(); List nsInfos = Lists.newArrayList(); for (String bpid : BLOCK_POOL_IDS) { - nsInfos.add(new NamespaceInfo(0, "cluster-id", bpid, 1)); + nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); } for (int i = 0; i < numNewVolumes; i++) { String path = BASE_DIR + "/newData" + i; + expectedVolumes.add(path); StorageLocation loc = StorageLocation.parse(path); Storage.StorageDirectory sd = createStorageDirectory(new File(path)); DataStorage.VolumeBuilder builder = @@ -159,7 +162,8 @@ public class TestFsDatasetImpl { Set actualVolumes = new HashSet(); for (int i = 0; i < numNewVolumes; i++) { - dataset.getVolumes().get(numExistingVolumes + i).getBasePath(); + actualVolumes.add( + dataset.getVolumes().get(numExistingVolumes + i).getBasePath()); } assertEquals(actualVolumes, expectedVolumes); } @@ -242,6 +246,33 @@ public class TestFsDatasetImpl { BlockPoolSlice.selectReplicaToDelete(replicaOtherNewer, replica)); } + @Test(timeout = 5000) + public void testRemoveNewlyAddedVolume() throws IOException { + final int numExistingVolumes = dataset.getVolumes().size(); + List nsInfos = new ArrayList(); + for (String bpid : BLOCK_POOL_IDS) { + nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); + } + String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater"; + StorageLocation loc = StorageLocation.parse(newVolumePath); + + Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath)); + DataStorage.VolumeBuilder builder = + new DataStorage.VolumeBuilder(storage, sd); + when(storage.prepareVolume(eq(datanode), eq(loc.getFile()), + anyListOf(NamespaceInfo.class))) + .thenReturn(builder); + + dataset.addVolume(loc, nsInfos); + assertEquals(numExistingVolumes + 1, dataset.getVolumes().size()); + + when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1); + when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd); + List volumesToRemove = Arrays.asList(loc); + dataset.removeVolumes(volumesToRemove); + assertEquals(numExistingVolumes, dataset.getVolumes().size()); + } + @Test(timeout = 5000) public void testChangeVolumeWithRunningCheckDirs() throws IOException { RoundRobinVolumeChoosingPolicy blockChooser = @@ -266,7 +297,7 @@ public class TestFsDatasetImpl { @Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { - volumeList.removeVolume("data4"); + volumeList.removeVolume(new File("data4")); volumeList.addVolume(newVolume); return null; }