HDFS-9439. Support reconfiguring fs.protected.directories without NN restart. (Contributed by Xiaobing Zhou)
This commit is contained in:
parent
8ee2140706
commit
ed15cb9a30
|
@ -17,7 +17,10 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.base.Joiner;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.protobuf.InvalidProtocolBufferException;
|
import com.google.protobuf.InvalidProtocolBufferException;
|
||||||
|
|
||||||
|
@ -146,7 +149,7 @@ public class FSDirectory implements Closeable {
|
||||||
// be deleted unless they are empty.
|
// be deleted unless they are empty.
|
||||||
//
|
//
|
||||||
// Each entry in this set must be a normalized path.
|
// Each entry in this set must be a normalized path.
|
||||||
private final SortedSet<String> protectedDirectories;
|
private volatile SortedSet<String> protectedDirectories;
|
||||||
|
|
||||||
// lock to protect the directory and BlockMap
|
// lock to protect the directory and BlockMap
|
||||||
private final ReentrantReadWriteLock dirLock;
|
private final ReentrantReadWriteLock dirLock;
|
||||||
|
@ -370,16 +373,53 @@ public class FSDirectory implements Closeable {
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
static SortedSet<String> parseProtectedDirectories(Configuration conf) {
|
static SortedSet<String> parseProtectedDirectories(Configuration conf) {
|
||||||
|
return parseProtectedDirectories(conf
|
||||||
|
.getTrimmedStringCollection(FS_PROTECTED_DIRECTORIES));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse configuration setting dfs.namenode.protected.directories to retrieve
|
||||||
|
* the set of protected directories.
|
||||||
|
*
|
||||||
|
* @param protectedDirsString
|
||||||
|
* a comma separated String representing a bunch of paths.
|
||||||
|
* @return a TreeSet
|
||||||
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
|
static SortedSet<String> parseProtectedDirectories(
|
||||||
|
final String protectedDirsString) {
|
||||||
|
return parseProtectedDirectories(StringUtils
|
||||||
|
.getTrimmedStringCollection(protectedDirsString));
|
||||||
|
}
|
||||||
|
|
||||||
|
private static SortedSet<String> parseProtectedDirectories(
|
||||||
|
final Collection<String> protectedDirs) {
|
||||||
// Normalize each input path to guard against administrator error.
|
// Normalize each input path to guard against administrator error.
|
||||||
return new TreeSet<>(normalizePaths(
|
return new TreeSet<>(
|
||||||
conf.getTrimmedStringCollection(FS_PROTECTED_DIRECTORIES),
|
normalizePaths(protectedDirs, FS_PROTECTED_DIRECTORIES));
|
||||||
FS_PROTECTED_DIRECTORIES));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SortedSet<String> getProtectedDirectories() {
|
SortedSet<String> getProtectedDirectories() {
|
||||||
return protectedDirectories;
|
return protectedDirectories;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set directories that cannot be removed unless empty, even by an
|
||||||
|
* administrator.
|
||||||
|
*
|
||||||
|
* @param protectedDirsString
|
||||||
|
* comma separated list of protected directories
|
||||||
|
*/
|
||||||
|
String setProtectedDirectories(String protectedDirsString) {
|
||||||
|
if (protectedDirsString == null) {
|
||||||
|
protectedDirectories = new TreeSet<>();
|
||||||
|
} else {
|
||||||
|
protectedDirectories = parseProtectedDirectories(protectedDirsString);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Joiner.on(",").skipNulls().join(protectedDirectories);
|
||||||
|
}
|
||||||
|
|
||||||
BlockManager getBlockManager() {
|
BlockManager getBlockManager() {
|
||||||
return getFSNamesystem().getBlockManager();
|
return getFSNamesystem().getBlockManager();
|
||||||
}
|
}
|
||||||
|
|
|
@ -149,6 +149,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.FS_PROTECTED_DIRECTORIES;
|
||||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||||
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
|
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
|
||||||
|
|
||||||
|
@ -272,8 +273,10 @@ public class NameNode extends ReconfigurableBase implements
|
||||||
|
|
||||||
/** A list of property that are reconfigurable at runtime. */
|
/** A list of property that are reconfigurable at runtime. */
|
||||||
static final List<String> RECONFIGURABLE_PROPERTIES = Collections
|
static final List<String> RECONFIGURABLE_PROPERTIES = Collections
|
||||||
.unmodifiableList(Arrays.asList(DFS_HEARTBEAT_INTERVAL_KEY,
|
.unmodifiableList(Arrays
|
||||||
DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY));
|
.asList(DFS_HEARTBEAT_INTERVAL_KEY,
|
||||||
|
DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
|
||||||
|
FS_PROTECTED_DIRECTORIES));
|
||||||
|
|
||||||
private static final String USAGE = "Usage: hdfs namenode ["
|
private static final String USAGE = "Usage: hdfs namenode ["
|
||||||
+ StartupOption.BACKUP.getName() + "] | \n\t["
|
+ StartupOption.BACKUP.getName() + "] | \n\t["
|
||||||
|
@ -2011,6 +2014,8 @@ public class NameNode extends ReconfigurableBase implements
|
||||||
LOG.info("RECONFIGURE* changed heartbeatRecheckInterval to "
|
LOG.info("RECONFIGURE* changed heartbeatRecheckInterval to "
|
||||||
+ datanodeManager.getHeartbeatRecheckInterval());
|
+ datanodeManager.getHeartbeatRecheckInterval());
|
||||||
}
|
}
|
||||||
|
case FS_PROTECTED_DIRECTORIES:
|
||||||
|
return getNamesystem().getFSDirectory().setProtectedDirectories(newVal);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
import com.google.common.collect.Iterables;
|
import com.google.common.collect.Iterables;
|
||||||
import com.google.common.collect.Maps;
|
import com.google.common.collect.Maps;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
@ -28,6 +29,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.rules.Timeout;
|
import org.junit.rules.Timeout;
|
||||||
|
@ -38,8 +40,10 @@ import java.io.IOException;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
|
||||||
import static org.hamcrest.core.Is.is;
|
import static org.hamcrest.core.Is.is;
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertThat;
|
import static org.junit.Assert.assertThat;
|
||||||
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_PROTECTED_DIRECTORIES;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Verify that the dfs.namenode.protected.directories setting is respected.
|
* Verify that the dfs.namenode.protected.directories setting is respected.
|
||||||
|
@ -189,6 +193,46 @@ public class TestProtectedDirectories {
|
||||||
return matrix;
|
return matrix;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testReconfigureProtectedPaths() throws Throwable {
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
Collection<Path> protectedPaths = Arrays.asList(new Path("/a"), new Path(
|
||||||
|
"/b"), new Path("/c"));
|
||||||
|
Collection<Path> unprotectedPaths = Arrays.asList();
|
||||||
|
|
||||||
|
MiniDFSCluster cluster = setupTestCase(conf, protectedPaths,
|
||||||
|
unprotectedPaths);
|
||||||
|
|
||||||
|
SortedSet<String> protectedPathsNew = new TreeSet<>(
|
||||||
|
FSDirectory.normalizePaths(Arrays.asList("/aa", "/bb", "/cc"),
|
||||||
|
FS_PROTECTED_DIRECTORIES));
|
||||||
|
|
||||||
|
String protectedPathsStrNew = "/aa,/bb,/cc";
|
||||||
|
|
||||||
|
NameNode nn = cluster.getNameNode();
|
||||||
|
|
||||||
|
// change properties
|
||||||
|
nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, protectedPathsStrNew);
|
||||||
|
|
||||||
|
FSDirectory fsDirectory = nn.getNamesystem().getFSDirectory();
|
||||||
|
// verify change
|
||||||
|
assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES),
|
||||||
|
protectedPathsNew, fsDirectory.getProtectedDirectories());
|
||||||
|
|
||||||
|
assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES),
|
||||||
|
protectedPathsStrNew, nn.getConf().get(FS_PROTECTED_DIRECTORIES));
|
||||||
|
|
||||||
|
// revert to default
|
||||||
|
nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, null);
|
||||||
|
|
||||||
|
// verify default
|
||||||
|
assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES),
|
||||||
|
new TreeSet<String>(), fsDirectory.getProtectedDirectories());
|
||||||
|
|
||||||
|
assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES),
|
||||||
|
null, nn.getConf().get(FS_PROTECTED_DIRECTORIES));
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testAll() throws Throwable {
|
public void testAll() throws Throwable {
|
||||||
for (TestMatrixEntry testMatrixEntry : createTestMatrix()) {
|
for (TestMatrixEntry testMatrixEntry : createTestMatrix()) {
|
||||||
|
|
|
@ -234,7 +234,7 @@ public class TestDFSAdmin {
|
||||||
final List<String> outs = Lists.newArrayList();
|
final List<String> outs = Lists.newArrayList();
|
||||||
final List<String> errs = Lists.newArrayList();
|
final List<String> errs = Lists.newArrayList();
|
||||||
getReconfigurableProperties("namenode", address, outs, errs);
|
getReconfigurableProperties("namenode", address, outs, errs);
|
||||||
assertEquals(3, outs.size());
|
assertEquals(4, outs.size());
|
||||||
assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(1));
|
assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(1));
|
||||||
assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(2));
|
assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(2));
|
||||||
assertEquals(errs.size(), 0);
|
assertEquals(errs.size(), 0);
|
||||||
|
|
Loading…
Reference in New Issue