From bf2936fd38449130a4d4eea2f81ee81039d779e3 Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Wed, 3 Feb 2016 07:35:01 +0530 Subject: [PATCH] HDFS-9715. Check storage ID uniqueness on datanode startup (Contributed by Lei (Eddy) Xu) (cherry picked from commit 04375756a5ed6e907ee7548469c2c508aebbafb7) (cherry picked from commit 8577c0b6a2bc7e4966866eac2bd1f2a112d0a7c1) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../fsdataset/impl/FsDatasetImpl.java | 46 +++++++++++-------- .../apache/hadoop/hdfs/UpgradeUtilities.java | 10 ++++ .../fsdataset/impl/TestFsDatasetImpl.java | 26 +++++++++++ .../hdfs/server/namenode/FSImageTestUtil.java | 33 +++++++++++-- 5 files changed, 95 insertions(+), 23 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 824b869e9b6..6bd2ec90729 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -959,6 +959,9 @@ Release 2.8.0 - UNRELEASED HDFS-8999. Allow a file to be closed with COMMITTED but not yet COMPLETE blocks. (szetszwo) + HDFS-9715. Check storage ID uniqueness on datanode startup + (Lei (Eddy) Xu via vinayakumarb) + BUG FIXES HDFS-8091: ACLStatus and XAttributes should be presented to diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 2c692b08037..cad724804cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -377,6 +377,31 @@ private static List getInitialVolumeFailureInfos( return volumeFailureInfos; } + /** + * Activate a volume to serve requests. + * @throws IOException if the storage UUID already exists. + */ + private synchronized void activateVolume( + ReplicaMap replicaMap, + Storage.StorageDirectory sd, StorageType storageType, + FsVolumeReference ref) throws IOException { + DatanodeStorage dnStorage = storageMap.get(sd.getStorageUuid()); + if (dnStorage != null) { + final String errorMsg = String.format( + "Found duplicated storage UUID: %s in %s.", + sd.getStorageUuid(), sd.getVersionFile()); + LOG.error(errorMsg); + throw new IOException(errorMsg); + } + volumeMap.addAll(replicaMap); + storageMap.put(sd.getStorageUuid(), + new DatanodeStorage(sd.getStorageUuid(), + DatanodeStorage.State.NORMAL, + storageType)); + asyncDiskService.addVolume(sd.getCurrentDir()); + volumes.addVolume(ref); + } + private void addVolume(Collection dataLocations, Storage.StorageDirectory sd) throws IOException { final File dir = sd.getCurrentDir(); @@ -392,16 +417,7 @@ private void addVolume(Collection dataLocations, ReplicaMap tempVolumeMap = new ReplicaMap(this); fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker); - synchronized (this) { - volumeMap.addAll(tempVolumeMap); - storageMap.put(sd.getStorageUuid(), - new DatanodeStorage(sd.getStorageUuid(), - DatanodeStorage.State.NORMAL, - storageType)); - asyncDiskService.addVolume(sd.getCurrentDir()); - volumes.addVolume(ref); - } - + activateVolume(tempVolumeMap, sd, storageType, ref); LOG.info("Added volume - " + dir + ", StorageType: " + storageType); } @@ -459,15 +475,7 @@ public void addVolume(final StorageLocation location, setupAsyncLazyPersistThread(fsVolume); builder.build(); - synchronized (this) { - volumeMap.addAll(tempVolumeMap); - storageMap.put(sd.getStorageUuid(), - new DatanodeStorage(sd.getStorageUuid(), - DatanodeStorage.State.NORMAL, - storageType)); - asyncDiskService.addVolume(sd.getCurrentDir()); - volumes.addVolume(ref); - } + activateVolume(tempVolumeMap, sd, storageType, ref); LOG.info("Added volume - " + dir + ", StorageType: " + storageType); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index 4e4ed130cad..527d24a8629 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -30,6 +30,7 @@ import java.net.URI; import java.util.Arrays; import java.util.Collections; +import java.util.Properties; import java.util.zip.CRC32; import org.apache.hadoop.conf.Configuration; @@ -49,6 +50,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion; import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.namenode.NNStorage; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import com.google.common.base.Preconditions; @@ -379,6 +381,14 @@ public static File[] createDataNodeStorageDirs(String[] parents, localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"), new Path(newDir.toString()), false); + // Change the storage UUID to avoid conflicts when DN starts up. + StorageDirectory sd = new StorageDirectory( + new File(datanodeStorage.toString())); + sd.setStorageUuid(DatanodeStorage.generateUuid()); + Properties properties = Storage.readPropertiesFile(sd.getVersionFile()); + properties.setProperty("storageID", sd.getStorageUuid()); + Storage.writeProperties(sd.getVersionFile(), properties); + retVal[i] = newDir; } return retVal; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index ab7051e9363..ec526d3948f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -207,6 +207,32 @@ public void testAddVolumes() throws IOException { assertTrue(actualVolumes.containsAll(expectedVolumes)); } + @Test + public void testAddVolumeWithSameStorageUuid() throws IOException { + HdfsConfiguration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(1).build(); + try { + cluster.waitActive(); + assertTrue(cluster.getDataNodes().get(0).isConnectedToNN( + cluster.getNameNode().getServiceRpcAddress())); + + MiniDFSCluster.DataNodeProperties dn = cluster.stopDataNode(0); + File vol0 = cluster.getStorageDir(0, 0); + File vol1 = cluster.getStorageDir(0, 1); + Storage.StorageDirectory sd0 = new Storage.StorageDirectory(vol0); + Storage.StorageDirectory sd1 = new Storage.StorageDirectory(vol1); + FileUtils.copyFile(sd0.getVersionFile(), sd1.getVersionFile()); + + cluster.restartDataNode(dn, true); + cluster.waitActive(); + assertFalse(cluster.getDataNodes().get(0).isConnectedToNN( + cluster.getNameNode().getServiceRpcAddress())); + } finally { + cluster.shutdown(); + } + } + @Test(timeout = 30000) public void testRemoveVolumes() throws IOException { // Feed FsDataset with block metadata. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index fafeae2fc90..12b5180c5b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -34,6 +34,8 @@ import java.util.Collections; import java.util.EnumMap; import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -313,21 +315,24 @@ public static void assertParallelFilesAreIdentical(List dirs, fileList.add(f); } } - + + Set ignoredProperties = new HashSet<>(); + ignoredProperties.add("storageID"); for (List sameNameList : groupedByName.values()) { if (sameNameList.get(0).isDirectory()) { // recurse assertParallelFilesAreIdentical(sameNameList, ignoredFileNames); } else { if ("VERSION".equals(sameNameList.get(0).getName())) { - assertPropertiesFilesSame(sameNameList.toArray(new File[0])); + assertPropertiesFilesSame(sameNameList.toArray(new File[0]), + ignoredProperties); } else { assertFileContentsSame(sameNameList.toArray(new File[0])); } } } } - + /** * Assert that a set of properties files all contain the same data. * We cannot simply check the md5sums here, since Properties files @@ -339,6 +344,20 @@ public static void assertParallelFilesAreIdentical(List dirs, */ public static void assertPropertiesFilesSame(File[] propFiles) throws IOException { + assertPropertiesFilesSame(propFiles, null); + } + + /** + * Assert that a set of properties files all contain the same data. + * + * @param propFiles the files to compare. + * @param ignoredProperties the property names to be ignored during + * comparison. + * @throws IOException if the files cannot be opened or read + * @throws AssertionError if the files differ + */ + public static void assertPropertiesFilesSame( + File[] propFiles, Set ignoredProperties) throws IOException { Set> prevProps = null; for (File f : propFiles) { @@ -355,7 +374,13 @@ public static void assertPropertiesFilesSame(File[] propFiles) } else { Set> diff = Sets.symmetricDifference(prevProps, props.entrySet()); - if (!diff.isEmpty()) { + Iterator> it = diff.iterator(); + while (it.hasNext()) { + Entry entry = it.next(); + if (ignoredProperties != null && + ignoredProperties.contains(entry.getKey())) { + continue; + } fail("Properties file " + f + " differs from " + propFiles[0]); } }