HDFS-2470. NN should automatically set permissions on dfs.namenode.*.dir. Contributed by Siddharth Wagle.

(cherry picked from commit a64a43b77fb1032dcb66730a6b6257a24726c256)
(cherry picked from commit 8b12381717)

 Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
This commit is contained in:
Arpit Agarwal 2019-08-26 15:43:52 -07:00 committed by Wei-Chiu Chuang
parent 56caacac1f
commit 8bb98076be
8 changed files with 115 additions and 14 deletions

View File

@ -538,6 +538,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTPS_PORT_DEFAULT;
public static final String DFS_NAMENODE_NAME_DIR_KEY =
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_NAME_DIR_KEY;
public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_KEY =
"dfs.namenode.storage.dir.perm";
public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT =
"700";
public static final String DFS_NAMENODE_EDITS_DIR_KEY =
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_EDITS_DIR_KEY;
public static final String DFS_NAMENODE_SHARED_EDITS_DIR_KEY = "dfs.namenode.shared.edits.dir";
@ -1029,6 +1033,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_JOURNALNODE_RPC_PORT_DEFAULT = 8485;
public static final String DFS_JOURNALNODE_RPC_BIND_HOST_KEY = "dfs.journalnode.rpc-bind-host";
public static final String DFS_JOURNALNODE_RPC_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_RPC_PORT_DEFAULT;
public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY =
"dfs.journalnode.edits.dir.perm";
public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT =
"700";
public static final String DFS_JOURNALNODE_HTTP_ADDRESS_KEY = "dfs.journalnode.http-address";
public static final int DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480;

View File

@ -26,6 +26,8 @@ import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@ -65,7 +67,9 @@ class JNStorage extends Storage {
StorageErrorReporter errorReporter) throws IOException {
super(NodeType.JOURNAL_NODE);
sd = new StorageDirectory(logDir);
sd = new StorageDirectory(logDir, null, false, new FsPermission(conf.get(
DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY,
DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT)));
this.addStorageDir(sd);
this.fjm = new FileJournalManager(conf, sd, errorReporter);

View File

@ -27,11 +27,14 @@ import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.PosixFilePermissions;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.commons.io.FileUtils;
@ -39,6 +42,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@ -276,6 +280,7 @@ public abstract class Storage extends StorageInfo {
final boolean isShared;
final StorageDirType dirType; // storage dir type
FileLock lock; // storage lock
private final FsPermission permission;
private String storageUuid = null; // Storage directory identifier.
@ -311,6 +316,11 @@ public abstract class Storage extends StorageInfo {
this(dir, dirType, isShared, null);
}
public StorageDirectory(File dir, StorageDirType dirType,
boolean isShared, FsPermission permission) {
this(dir, dirType, isShared, null, permission);
}
/**
* Constructor
* @param dirType storage directory type
@ -320,7 +330,7 @@ public abstract class Storage extends StorageInfo {
*/
public StorageDirectory(StorageDirType dirType, boolean isShared,
StorageLocation location) {
this(getStorageLocationFile(location), dirType, isShared, location);
this(getStorageLocationFile(location), dirType, isShared, location, null);
}
/**
@ -334,7 +344,7 @@ public abstract class Storage extends StorageInfo {
public StorageDirectory(String bpid, StorageDirType dirType,
boolean isShared, StorageLocation location) {
this(getBlockPoolCurrentDir(bpid, location), dirType,
isShared, location);
isShared, location, null);
}
private static File getBlockPoolCurrentDir(String bpid,
@ -348,13 +358,14 @@ public abstract class Storage extends StorageInfo {
}
private StorageDirectory(File dir, StorageDirType dirType,
boolean isShared, StorageLocation location) {
boolean isShared, StorageLocation location, FsPermission permission) {
this.root = dir;
this.lock = null;
// default dirType is UNDEFINED
this.dirType = (dirType == null ? NameNodeDirType.UNDEFINED : dirType);
this.isShared = isShared;
this.location = location;
this.permission = permission;
assert location == null || dir == null ||
dir.getAbsolutePath().startsWith(
new File(location.getUri()).getAbsolutePath()):
@ -432,8 +443,14 @@ public abstract class Storage extends StorageInfo {
if (!(FileUtil.fullyDelete(curDir)))
throw new IOException("Cannot remove current directory: " + curDir);
}
if (!curDir.mkdirs())
if (!curDir.mkdirs()) {
throw new IOException("Cannot create directory " + curDir);
}
if (permission != null) {
Set<PosixFilePermission> permissions =
PosixFilePermissions.fromString(permission.toString());
Files.setPosixFilePermissions(curDir.toPath(), permissions);
}
}
/**
@ -655,8 +672,9 @@ public abstract class Storage extends StorageInfo {
return StorageState.NON_EXISTENT;
}
LOG.info("{} does not exist. Creating ...", rootPath);
if (!root.mkdirs())
if (!root.mkdirs()) {
throw new IOException("Cannot create directory " + rootPath);
}
hadMkdirs = true;
}
// or is inaccessible

View File

@ -298,7 +298,7 @@ public class FSImage implements Closeable {
// triggered.
LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
LOG.info("Formatting ...");
sd.clearDirectory(); // create empty currrent dir
sd.clearDirectory(); // create empty current dir
// For non-HA, no further action is needed here, as saveNamespace will
// take care of the rest.
if (!target.isHaEnabled()) {

View File

@ -38,6 +38,8 @@ import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@ -128,7 +130,7 @@ public class NNStorage extends Storage implements Closeable,
private boolean restoreFailedStorage = false;
private final Object restorationLock = new Object();
private boolean disablePreUpgradableLayoutCheck = false;
private final Configuration conf;
/**
* TxId of the last transaction that was included in the most
@ -171,6 +173,7 @@ public class NNStorage extends Storage implements Closeable,
Collection<URI> imageDirs, Collection<URI> editsDirs)
throws IOException {
super(NodeType.NAME_NODE);
this.conf = conf;
// this may modify the editsDirs, so copy before passing in
setStorageDirectories(imageDirs,
@ -312,10 +315,16 @@ public class NNStorage extends Storage implements Closeable,
NameNodeDirType.IMAGE;
// Add to the list of storage directories, only if the
// URI is of type file://
if(dirName.getScheme().compareTo("file") == 0) {
this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
if (dirName.getScheme().compareTo("file") == 0) {
// Don't lock the dir if it's shared.
StorageDirectory sd = new StorageDirectory(new File(dirName.getPath()),
dirType,
sharedEditsDirs.contains(dirName))); // Don't lock the dir if it's shared.
sharedEditsDirs.contains(dirName),
new FsPermission(conf.get(
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY,
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT)));
this.addStorageDir(sd);
}
}
@ -324,9 +333,12 @@ public class NNStorage extends Storage implements Closeable,
checkSchemeConsistency(dirName);
// Add to the list of storage directories, only if the
// URI is of type file://
if(dirName.getScheme().compareTo("file") == 0) {
if (dirName.getScheme().compareTo("file") == 0) {
this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
NameNodeDirType.EDITS, sharedEditsDirs.contains(dirName)));
NameNodeDirType.EDITS, sharedEditsDirs.contains(dirName),
new FsPermission(conf.get(
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY,
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT))));
}
}
}

View File

@ -5131,4 +5131,24 @@
The queue size of BlockReportProcessingThread in BlockManager.
</description>
</property>
<property>
<name>dfs.namenode.storage.dir.perm</name>
<value>700</value>
<description>
Permissions for the directories on on the local filesystem where
the DFS namenode stores the fsImage. The permissions can either be
octal or symbolic.
</description>
</property>
<property>
<name>dfs.journalnode.edits.dir.perm</name>
<value>700</value>
<description>
Permissions for the directories on on the local filesystem where
the DFS journal node stores the edits. The permissions can either be
octal or symbolic.
</description>
</property>
</configuration>

View File

@ -56,6 +56,9 @@ import java.util.regex.Pattern;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FileSystem;
@ -1255,6 +1258,17 @@ public class TestEditLog {
Collections.<URI>emptyList(),
editUris);
storage.format(new NamespaceInfo());
// Verify permissions
LocalFileSystem fs = LocalFileSystem.getLocal(getConf());
for (URI uri : editUris) {
String currDir = uri.getPath() + Path.SEPARATOR +
Storage.STORAGE_DIR_CURRENT;
FileStatus fileStatus = fs.getFileLinkStatus(new Path(currDir));
FsPermission permission = fileStatus.getPermission();
assertEquals(getConf().getInt(
DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY, 700),
permission.toOctal());
}
FSEditLog editlog = getFSEditLog(storage);
// open the edit log and add two transactions
// logGenerationStamp is used, simply because it doesn't

View File

@ -37,9 +37,11 @@ import java.nio.file.Paths;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@ -104,7 +106,7 @@ public class TestStartup {
config = new HdfsConfiguration();
hdfsDir = new File(MiniDFSCluster.getBaseDirectory());
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
@ -790,4 +792,27 @@ public class TestStartup {
return;
}
@Test(timeout = 60000)
public void testDirectoryPermissions() throws Exception {
Configuration conf = new Configuration();
try (MiniDFSCluster dfsCluster
= new MiniDFSCluster.Builder(conf).build()) {
dfsCluster.waitActive();
// name and edits
List<StorageDirectory> nameDirs =
dfsCluster.getNameNode().getFSImage().getStorage().getStorageDirs();
Collection<URI> nameDirUris = nameDirs.stream().map(d -> d
.getCurrentDir().toURI()).collect(Collectors.toList());
assertNotNull(nameDirUris);
LocalFileSystem fs = LocalFileSystem.getLocal(config);
FsPermission permission = new FsPermission(conf.get(
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY,
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT));
for (URI uri : nameDirUris) {
FileStatus fileStatus = fs.getFileLinkStatus(new Path(uri));
assertEquals(permission.toOctal(),
fileStatus.getPermission().toOctal());
}
}
}
}