From ed15cb9a307e8084682c6b248d1528e7d9dd7978 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Tue, 29 Mar 2016 13:55:00 -0700 Subject: [PATCH] HDFS-9439. Support reconfiguring fs.protected.directories without NN restart. (Contributed by Xiaobing Zhou) --- .../hdfs/server/namenode/FSDirectory.java | 48 +++++++++++++++++-- .../hadoop/hdfs/server/namenode/NameNode.java | 9 +++- .../namenode/TestProtectedDirectories.java | 44 +++++++++++++++++ .../hadoop/hdfs/tools/TestDFSAdmin.java | 2 +- 4 files changed, 96 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index f0031c52729..f1400208175 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import org.apache.hadoop.util.StringUtils; + import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.protobuf.InvalidProtocolBufferException; @@ -146,7 +149,7 @@ public class FSDirectory implements Closeable { // be deleted unless they are empty. // // Each entry in this set must be a normalized path. - private final SortedSet protectedDirectories; + private volatile SortedSet protectedDirectories; // lock to protect the directory and BlockMap private final ReentrantReadWriteLock dirLock; @@ -370,16 +373,53 @@ public class FSDirectory implements Closeable { */ @VisibleForTesting static SortedSet parseProtectedDirectories(Configuration conf) { + return parseProtectedDirectories(conf + .getTrimmedStringCollection(FS_PROTECTED_DIRECTORIES)); + } + + /** + * Parse configuration setting dfs.namenode.protected.directories to retrieve + * the set of protected directories. + * + * @param protectedDirsString + * a comma separated String representing a bunch of paths. + * @return a TreeSet + */ + @VisibleForTesting + static SortedSet parseProtectedDirectories( + final String protectedDirsString) { + return parseProtectedDirectories(StringUtils + .getTrimmedStringCollection(protectedDirsString)); + } + + private static SortedSet parseProtectedDirectories( + final Collection protectedDirs) { // Normalize each input path to guard against administrator error. - return new TreeSet<>(normalizePaths( - conf.getTrimmedStringCollection(FS_PROTECTED_DIRECTORIES), - FS_PROTECTED_DIRECTORIES)); + return new TreeSet<>( + normalizePaths(protectedDirs, FS_PROTECTED_DIRECTORIES)); } SortedSet getProtectedDirectories() { return protectedDirectories; } + /** + * Set directories that cannot be removed unless empty, even by an + * administrator. + * + * @param protectedDirsString + * comma separated list of protected directories + */ + String setProtectedDirectories(String protectedDirsString) { + if (protectedDirsString == null) { + protectedDirectories = new TreeSet<>(); + } else { + protectedDirectories = parseProtectedDirectories(protectedDirsString); + } + + return Joiner.on(",").skipNulls().join(protectedDirectories); + } + BlockManager getBlockManager() { return getFSNamesystem().getBlockManager(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 8a87a1d8efb..0e6cb9024d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -149,6 +149,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.FS_PROTECTED_DIRECTORIES; import static org.apache.hadoop.util.ExitUtil.terminate; import static org.apache.hadoop.util.ToolRunner.confirmPrompt; @@ -272,8 +273,10 @@ public class NameNode extends ReconfigurableBase implements /** A list of property that are reconfigurable at runtime. */ static final List RECONFIGURABLE_PROPERTIES = Collections - .unmodifiableList(Arrays.asList(DFS_HEARTBEAT_INTERVAL_KEY, - DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY)); + .unmodifiableList(Arrays + .asList(DFS_HEARTBEAT_INTERVAL_KEY, + DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, + FS_PROTECTED_DIRECTORIES)); private static final String USAGE = "Usage: hdfs namenode [" + StartupOption.BACKUP.getName() + "] | \n\t[" @@ -2011,6 +2014,8 @@ public class NameNode extends ReconfigurableBase implements LOG.info("RECONFIGURE* changed heartbeatRecheckInterval to " + datanodeManager.getHeartbeatRecheckInterval()); } + case FS_PROTECTED_DIRECTORIES: + return getNamesystem().getFSDirectory().setProtectedDirectories(newVal); default: break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java index be7b6866757..e7d2d6e180a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.base.Joiner; import com.google.common.collect.Iterables; import com.google.common.collect.Maps; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; @@ -28,6 +29,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; @@ -38,8 +40,10 @@ import java.io.IOException; import java.util.*; import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_PROTECTED_DIRECTORIES; /** * Verify that the dfs.namenode.protected.directories setting is respected. @@ -189,6 +193,46 @@ public class TestProtectedDirectories { return matrix; } + @Test + public void testReconfigureProtectedPaths() throws Throwable { + Configuration conf = new HdfsConfiguration(); + Collection protectedPaths = Arrays.asList(new Path("/a"), new Path( + "/b"), new Path("/c")); + Collection unprotectedPaths = Arrays.asList(); + + MiniDFSCluster cluster = setupTestCase(conf, protectedPaths, + unprotectedPaths); + + SortedSet protectedPathsNew = new TreeSet<>( + FSDirectory.normalizePaths(Arrays.asList("/aa", "/bb", "/cc"), + FS_PROTECTED_DIRECTORIES)); + + String protectedPathsStrNew = "/aa,/bb,/cc"; + + NameNode nn = cluster.getNameNode(); + + // change properties + nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, protectedPathsStrNew); + + FSDirectory fsDirectory = nn.getNamesystem().getFSDirectory(); + // verify change + assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), + protectedPathsNew, fsDirectory.getProtectedDirectories()); + + assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), + protectedPathsStrNew, nn.getConf().get(FS_PROTECTED_DIRECTORIES)); + + // revert to default + nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, null); + + // verify default + assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), + new TreeSet(), fsDirectory.getProtectedDirectories()); + + assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), + null, nn.getConf().get(FS_PROTECTED_DIRECTORIES)); + } + @Test public void testAll() throws Throwable { for (TestMatrixEntry testMatrixEntry : createTestMatrix()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 81f93aa99b9..3ca7fec54dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -234,7 +234,7 @@ public class TestDFSAdmin { final List outs = Lists.newArrayList(); final List errs = Lists.newArrayList(); getReconfigurableProperties("namenode", address, outs, errs); - assertEquals(3, outs.size()); + assertEquals(4, outs.size()); assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(1)); assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(2)); assertEquals(errs.size(), 0);