From 407077024331aef258cde58b0123c7e92dcc1fa9 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 27 May 2015 13:15:32 -0700 Subject: [PATCH] HBASE-13780 Default to 700 for HDFS root dir permissions for secure deployments (Enis Soztutar) --- .../src/main/resources/hbase-default.xml | 7 ++++++ .../hadoop/hbase/master/MasterFileSystem.java | 22 +++++++++++++++-- src/main/asciidoc/_chapters/security.adoc | 24 +++++++++++++++++-- 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 49dd9eb5dcb..9a4baf5da1a 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1188,6 +1188,13 @@ possible configurations would overwhelm and obscure the important. false Use Thrift TCompactProtocol binary serialization protocol. + + hbase.rootdir.perms + 700 + FS Permissions for the root directory in a secure(kerberos) setup. + When master starts, it creates the rootdir with this permissions or sets the permissions + if it does not match. + hbase.data.umask.enable false diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index de28cdc0d2d..904da845777 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -46,7 +47,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.HFileArchiver; -import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; @@ -416,10 +416,18 @@ public class MasterFileSystem { throws IOException { // If FS is in safe mode wait till out of it. FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000)); + + boolean isSecurityEnabled = "kerberos".equalsIgnoreCase(c.get("hbase.security.authentication")); + FsPermission rootDirPerms = new FsPermission(c.get("hbase.rootdir.perms", "700")); + // Filesystem is good. Go ahead and check for hbase.rootdir. try { if (!fs.exists(rd)) { - fs.mkdirs(rd); + if (isSecurityEnabled) { + fs.mkdirs(rd, rootDirPerms); + } else { + fs.mkdirs(rd); + } // DFS leaves safe mode with 0 DNs when there are 0 blocks. // We used to handle this by checking the current DN count and waiting until // it is nonzero. With security, the check for datanode count doesn't work -- @@ -434,6 +442,16 @@ public class MasterFileSystem { if (!fs.isDirectory(rd)) { throw new IllegalArgumentException(rd.toString() + " is not a directory"); } + if (isSecurityEnabled && !rootDirPerms.equals(fs.getFileStatus(rd).getPermission())) { + // check whether the permission match + LOG.warn("Found rootdir permissions NOT matching expected \"hbase.rootdir.perms\" for " + + "rootdir=" + rd.toString() + " permissions=" + fs.getFileStatus(rd).getPermission() + + " and \"hbase.rootdir.perms\" configured as " + + c.get("hbase.rootdir.perms", "700") + ". Automatically setting the permissions. You" + + " can change the permissions by setting \"hbase.rootdir.perms\" in hbase-site.xml " + + "and restarting the master"); + fs.setPermission(rd, rootDirPerms); + } // as above FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS, diff --git a/src/main/asciidoc/_chapters/security.adoc b/src/main/asciidoc/_chapters/security.adoc index 25153a5f2b8..f9a4b8a4e06 100644 --- a/src/main/asciidoc/_chapters/security.adoc +++ b/src/main/asciidoc/_chapters/security.adoc @@ -81,8 +81,8 @@ This describes how to set up Apache HBase and clients for connection to secure H === Prerequisites Hadoop Authentication Configuration:: - To run HBase RPC with strong authentication, you must set `hbase.security.authentication` to `true`. - In this case, you must also set `hadoop.security.authentication` to `true`. + To run HBase RPC with strong authentication, you must set `hbase.security.authentication` to `kerberos`. + In this case, you must also set `hadoop.security.authentication` to `kerberos` in core-site.xml. Otherwise, you would be using strong authentication for HBase but not for the underlying HDFS, which would cancel out any benefit. Kerberos KDC:: @@ -500,6 +500,26 @@ For more information about ACLs, please see the <>). HBase sets up the znode ACLs so that only the HBase user and the configured hbase superuser (`hbase.superuser`) can access and modify the data. In cases where ZooKeeper is used for service discovery or sharing state with the client, the znodes created by HBase will also allow anyone (regardless of authentication) to read these znodes (clusterId, master address, meta location, etc), but only the HBase user can modify them. + +=== Securing File System (HDFS) Data +All of the data under management is kept under the root directory in the file system (`hbase.rootdir`). Access to the data and WAL files in the filesystem should be restricted so that users cannot bypass the HBase layer, and peek at the underlying data files from the file system. HBase assumes the filesystem used (HDFS or other) enforces permissions hierarchically. If sufficient protection from the file system (both authorization and authentication) is not provided, HBase level authorization control (ACLs, visibility labels, etc) is meaningless since the user can always access the data from the file system. + +HBase enforces the posix-like permissions 700 (`rwx------`) to its root directory. It means that only the HBase user can read or write the files in FS. The default setting can be changed by configuring `hbase.rootdir.perms` in hbase-site.xml. A restart of the active master is needed so that it changes the used permissions. For versions before 1.2.0, you can check whether HBASE-13780 is committed, and if not, you can manually set the permissions for the root directory if needed. Using HDFS, the command would be: +[source,bash] +---- +sudo -u hdfs hadoop fs -chmod 700 /hbase +---- +You should change `/hbase` if you are using a different `hbase.rootdir`. + +In secure mode, SecureBulkLoadEndpoint should be configured and used for properly handing of users files created from MR jobs to the HBase daemons and HBase user. The staging directory in the distributed file system used for bulk load (`hbase.bulkload.staging.dir`, defaults to `/tmp/hbase-staging`) should have (mode 711, or `rwx--x--x`) so that users can access the staging directory created under that parent directory, but cannot do any other operation. See <> for how to configure SecureBulkLoadEndPoint. + == Securing Access To Your Data After you have configured secure authentication between HBase client and server processes and gateways, you need to consider the security of your data itself.