HDFS-9439. Support reconfiguring fs.protected.directories without NN restart. (Contributed by Xiaobing Zhou)

This commit is contained in:
Arpit Agarwal 2016-03-29 13:55:00 -07:00
parent c9307e48b7
commit ddfe6774c2
4 changed files with 96 additions and 7 deletions

View File

@ -17,7 +17,10 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.protobuf.InvalidProtocolBufferException;
@ -146,7 +149,7 @@ public class FSDirectory implements Closeable {
// be deleted unless they are empty.
//
// Each entry in this set must be a normalized path.
private final SortedSet<String> protectedDirectories;
private volatile SortedSet<String> protectedDirectories;
// lock to protect the directory and BlockMap
private final ReentrantReadWriteLock dirLock;
@ -370,16 +373,53 @@ public class FSDirectory implements Closeable {
*/
@VisibleForTesting
static SortedSet<String> parseProtectedDirectories(Configuration conf) {
return parseProtectedDirectories(conf
.getTrimmedStringCollection(FS_PROTECTED_DIRECTORIES));
}
/**
* Parse configuration setting dfs.namenode.protected.directories to retrieve
* the set of protected directories.
*
* @param protectedDirsString
* a comma separated String representing a bunch of paths.
* @return a TreeSet
*/
@VisibleForTesting
static SortedSet<String> parseProtectedDirectories(
final String protectedDirsString) {
return parseProtectedDirectories(StringUtils
.getTrimmedStringCollection(protectedDirsString));
}
private static SortedSet<String> parseProtectedDirectories(
final Collection<String> protectedDirs) {
// Normalize each input path to guard against administrator error.
return new TreeSet<>(normalizePaths(
conf.getTrimmedStringCollection(FS_PROTECTED_DIRECTORIES),
FS_PROTECTED_DIRECTORIES));
return new TreeSet<>(
normalizePaths(protectedDirs, FS_PROTECTED_DIRECTORIES));
}
SortedSet<String> getProtectedDirectories() {
return protectedDirectories;
}
/**
* Set directories that cannot be removed unless empty, even by an
* administrator.
*
* @param protectedDirsString
* comma separated list of protected directories
*/
String setProtectedDirectories(String protectedDirsString) {
if (protectedDirsString == null) {
protectedDirectories = new TreeSet<>();
} else {
protectedDirectories = parseProtectedDirectories(protectedDirsString);
}
return Joiner.on(",").skipNulls().join(protectedDirectories);
}
BlockManager getBlockManager() {
return getFSNamesystem().getBlockManager();
}

View File

@ -150,6 +150,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.FS_PROTECTED_DIRECTORIES;
import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
@ -273,8 +274,10 @@ public class NameNode extends ReconfigurableBase implements
/** A list of property that are reconfigurable at runtime. */
static final List<String> RECONFIGURABLE_PROPERTIES = Collections
.unmodifiableList(Arrays.asList(DFS_HEARTBEAT_INTERVAL_KEY,
DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY));
.unmodifiableList(Arrays
.asList(DFS_HEARTBEAT_INTERVAL_KEY,
DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
FS_PROTECTED_DIRECTORIES));
private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
@ -2004,6 +2007,8 @@ public class NameNode extends ReconfigurableBase implements
LOG.info("RECONFIGURE* changed heartbeatRecheckInterval to "
+ datanodeManager.getHeartbeatRecheckInterval());
}
case FS_PROTECTED_DIRECTORIES:
return getNamesystem().getFSDirectory().setProtectedDirectories(newVal);
default:
break;
}

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Joiner;
import com.google.common.collect.Iterables;
import com.google.common.collect.Maps;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
@ -28,6 +29,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
@ -38,8 +40,10 @@ import java.io.IOException;
import java.util.*;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_PROTECTED_DIRECTORIES;
/**
* Verify that the dfs.namenode.protected.directories setting is respected.
@ -189,6 +193,46 @@ public class TestProtectedDirectories {
return matrix;
}
@Test
public void testReconfigureProtectedPaths() throws Throwable {
Configuration conf = new HdfsConfiguration();
Collection<Path> protectedPaths = Arrays.asList(new Path("/a"), new Path(
"/b"), new Path("/c"));
Collection<Path> unprotectedPaths = Arrays.asList();
MiniDFSCluster cluster = setupTestCase(conf, protectedPaths,
unprotectedPaths);
SortedSet<String> protectedPathsNew = new TreeSet<>(
FSDirectory.normalizePaths(Arrays.asList("/aa", "/bb", "/cc"),
FS_PROTECTED_DIRECTORIES));
String protectedPathsStrNew = "/aa,/bb,/cc";
NameNode nn = cluster.getNameNode();
// change properties
nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, protectedPathsStrNew);
FSDirectory fsDirectory = nn.getNamesystem().getFSDirectory();
// verify change
assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES),
protectedPathsNew, fsDirectory.getProtectedDirectories());
assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES),
protectedPathsStrNew, nn.getConf().get(FS_PROTECTED_DIRECTORIES));
// revert to default
nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, null);
// verify default
assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES),
new TreeSet<String>(), fsDirectory.getProtectedDirectories());
assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES),
null, nn.getConf().get(FS_PROTECTED_DIRECTORIES));
}
@Test
public void testAll() throws Throwable {
for (TestMatrixEntry testMatrixEntry : createTestMatrix()) {

View File

@ -234,7 +234,7 @@ public class TestDFSAdmin {
final List<String> outs = Lists.newArrayList();
final List<String> errs = Lists.newArrayList();
getReconfigurableProperties("namenode", address, outs, errs);
assertEquals(3, outs.size());
assertEquals(4, outs.size());
assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(1));
assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(2));
assertEquals(errs.size(), 0);