From db81cc3adddea2c66e178b725622d53497ba729b Mon Sep 17 00:00:00 2001 From: Lei Xu Date: Thu, 29 Dec 2016 16:57:40 +0800 Subject: [PATCH] HDFS-11267. Avoid redefinition of storageDirs in NNStorage and cleanup its accessors in Storage. (Manoj Govindassamy via lei) (cherry picked from commit a4f66655ec22ca8c960f971f2b0cdafbd3430ad7) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java --- .../apache/hadoop/hdfs/server/common/Storage.java | 8 ++++++-- .../server/datanode/BlockPoolSliceStorage.java | 6 +++--- .../hadoop/hdfs/server/datanode/DataStorage.java | 8 ++++---- .../hadoop/hdfs/server/namenode/NNStorage.java | 14 ++++++-------- .../hadoop/hdfs/server/common/StorageAdapter.java | 2 +- .../server/datanode/TestBlockPoolSliceStorage.java | 2 +- 6 files changed, 21 insertions(+), 19 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 009d0f03e57..b0fe2900591 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -124,7 +124,7 @@ public abstract class Storage extends StorageInfo { } protected List storageDirs = - new CopyOnWriteArrayList(); + new CopyOnWriteArrayList<>(); private class DirIterator implements Iterator { final StorageDirType dirType; @@ -879,7 +879,11 @@ public abstract class Storage extends StorageInfo { public int getNumStorageDirs() { return storageDirs.size(); } - + + public List getStorageDirs() { + return storageDirs; + } + public StorageDirectory getStorageDir(int idx) { return storageDirs.get(idx); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index 81fce6f621b..a7edb4b6002 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -292,7 +292,7 @@ public class BlockPoolSliceStorage extends Storage { void remove(File absPathToRemove) { Preconditions.checkArgument(absPathToRemove.isAbsolute()); LOG.info("Removing block level storage: " + absPathToRemove); - for (Iterator it = this.storageDirs.iterator(); + for (Iterator it = getStorageDirs().iterator(); it.hasNext(); ) { StorageDirectory sd = it.next(); if (sd.getRoot().getAbsoluteFile().equals(absPathToRemove)) { @@ -772,7 +772,7 @@ public class BlockPoolSliceStorage extends Storage { */ public void clearTrash() { final List trashRoots = new ArrayList<>(); - for (StorageDirectory sd : storageDirs) { + for (StorageDirectory sd : getStorageDirs()) { File trashRoot = getTrashRootDir(sd); if (trashRoot.exists() && sd.getPreviousDir().exists()) { LOG.error("Trash and PreviousDir shouldn't both exist for storage " @@ -810,7 +810,7 @@ public class BlockPoolSliceStorage extends Storage { /** trash is enabled if at least one storage directory contains trash root */ @VisibleForTesting public boolean trashEnabled() { - for (StorageDirectory sd : storageDirs) { + for (StorageDirectory sd : getStorageDirs()) { if (getTrashRootDir(sd).exists()) { return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 0a8f1fef3e2..a1abfcefca2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -188,11 +188,11 @@ public class DataStorage extends Storage { } public void setRollingUpgradeMarker(String bpid) throws IOException { - getBPStorage(bpid).setRollingUpgradeMarkers(storageDirs); + getBPStorage(bpid).setRollingUpgradeMarkers(getStorageDirs()); } public void clearRollingUpgradeMarker(String bpid) throws IOException { - getBPStorage(bpid).clearRollingUpgradeMarkers(storageDirs); + getBPStorage(bpid).clearRollingUpgradeMarkers(getStorageDirs()); } /** @@ -505,7 +505,7 @@ public class DataStorage extends Storage { } StringBuilder errorMsgBuilder = new StringBuilder(); - for (Iterator it = this.storageDirs.iterator(); + for (Iterator it = getStorageDirs().iterator(); it.hasNext(); ) { StorageDirectory sd = it.next(); if (dirsToRemove.contains(sd.getRoot())) { @@ -1027,7 +1027,7 @@ public class DataStorage extends Storage { // To handle finalizing a snapshot taken at datanode level while // upgrading to federation, if datanode level snapshot previous exists, // then finalize it. Else finalize the corresponding BP. - for (StorageDirectory sd : storageDirs) { + for (StorageDirectory sd : getStorageDirs()) { File prevDir = sd.getPreviousDir(); if (prevDir.exists()) { // data node level storage finalize diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index 6290c431ecd..a8f998c2966 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -167,8 +167,6 @@ public class NNStorage extends Storage implements Closeable, throws IOException { super(NodeType.NAME_NODE); - storageDirs = new CopyOnWriteArrayList(); - // this may modify the editsDirs, so copy before passing in setStorageDirectories(imageDirs, Lists.newArrayList(editsDirs), @@ -206,7 +204,7 @@ public class NNStorage extends Storage implements Closeable, @Override // Closeable public void close() throws IOException { unlockAll(); - storageDirs.clear(); + getStorageDirs().clear(); } /** @@ -291,7 +289,7 @@ public class NNStorage extends Storage implements Closeable, Collection fsEditsDirs, Collection sharedEditsDirs) throws IOException { - this.storageDirs.clear(); + getStorageDirs().clear(); this.removedStorageDirs.clear(); // Add all name dirs with appropriate NameNodeDirType @@ -863,7 +861,7 @@ public class NNStorage extends Storage implements Closeable, + sd.getRoot().getPath(), e); } - if (this.storageDirs.remove(sd)) { + if (getStorageDirs().remove(sd)) { this.removedStorageDirs.add(sd); } @@ -917,7 +915,7 @@ public class NNStorage extends Storage implements Closeable, // getCanonicalPath may need to call stat() or readlink() and it's likely // those calls would fail due to the same underlying IO problem. String absPath = f.getAbsolutePath(); - for (StorageDirectory sd : storageDirs) { + for (StorageDirectory sd : getStorageDirs()) { String dirPath = sd.getRoot().getAbsolutePath(); if (!dirPath.endsWith(File.separator)) { dirPath += File.separator; @@ -1125,14 +1123,14 @@ public class NNStorage extends Storage implements Closeable, @Override public void writeAll() throws IOException { this.layoutVersion = getServiceLayoutVersion(); - for (StorageDirectory sd : storageDirs) { + for (StorageDirectory sd : getStorageDirs()) { try { writeProperties(sd); } catch (Exception e) { LOG.warn("Error during write properties to the VERSION file to " + sd.toString(), e); reportErrorsOnDirectory(sd); - if (storageDirs.isEmpty()) { + if (getStorageDirs().isEmpty()) { throw new IOException("All the storage failed while writing " + "properties to VERSION file"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/StorageAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/StorageAdapter.java index 3dd81b257f9..c2df9bf9128 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/StorageAdapter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/StorageAdapter.java @@ -33,7 +33,7 @@ public abstract class StorageAdapter { Storage s, int idx) { StorageDirectory dir = Mockito.spy(s.getStorageDir(idx)); - s.storageDirs.set(idx, dir); + s.getStorageDirs().set(idx, dir); return dir; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java index b5951a0aad2..0371b4c2e55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java @@ -49,7 +49,7 @@ public class TestBlockPoolSliceStorage { String clusterId) { super(namespaceID, bpID, cTime, clusterId); addStorageDir(new StorageDirectory(new File("/tmp/dontcare/" + bpID))); - assertThat(storageDirs.size(), is(1)); + assertThat(getStorageDirs().size(), is(1)); } }