diff --git a/src/main/java/org/apache/hadoop/hbase/HConstants.java b/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 92ead028c8d..19be4de9b8e 100644
--- a/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -639,6 +639,11 @@ public final class HConstants {
public static final String CHECKSUM_TYPE_NAME =
"hbase.hstore.checksum.algorithm";
+ /** Enable file permission modification from standard hbase */
+ public static final String ENABLE_DATA_FILE_UMASK = "hbase.data.umask.enable";
+ /** File permission umask to use when creating hbase data files */
+ public static final String DATA_FILE_UMASK_KEY = "hbase.data.umask";
+
private HConstants() {
// Can't be instantiated with this ctor.
}
diff --git a/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java b/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
index 9e7e624647b..4acabdbfe99 100644
--- a/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
+++ b/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
@@ -30,10 +30,12 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Writable;
@@ -266,9 +268,8 @@ public abstract class AbstractHFileWriter extends SchemaConfigured
/** A helper method to create HFile output streams in constructors */
protected static FSDataOutputStream createOutputStream(Configuration conf,
FileSystem fs, Path path) throws IOException {
- return fs.create(path, FsPermission.getDefault(), true,
- fs.getConf().getInt("io.file.buffer.size", 4096),
- fs.getDefaultReplication(), fs.getDefaultBlockSize(),
- null);
+ FsPermission perms = FSUtils.getFilePermissions(fs, conf,
+ HConstants.DATA_FILE_UMASK_KEY);
+ return FSUtils.create(fs, path, perms);
}
}
diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 555ffdcb6a0..c0b0fa83f28 100644
--- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -769,8 +770,15 @@ public class HRegion implements HeapSize { // , Writable{
// create but before close. If we don't successfully close the file,
// subsequent region reopens will fail the below because create is
// registered in NN.
+
+ // first check to get the permissions
+ FsPermission perms = FSUtils.getFilePermissions(fs, conf,
+ HConstants.DATA_FILE_UMASK_KEY);
+
+ // and then create the file
Path tmpPath = new Path(getTmpDir(), REGIONINFO_FILE);
- FSDataOutputStream out = this.fs.create(tmpPath, true);
+ FSDataOutputStream out = FSUtils.create(fs, tmpPath, perms);
+
try {
this.regionInfo.write(out);
out.write('\n');
diff --git a/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index d2d7efe361d..4bfd42f390a 100644
--- a/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
@@ -61,6 +62,9 @@ import org.apache.hadoop.util.StringUtils;
public abstract class FSUtils {
private static final Log LOG = LogFactory.getLog(FSUtils.class);
+ /** Full access permissions (starting point for a umask) */
+ private static final String FULL_RWX_PERMISSIONS = "777";
+
protected FSUtils() {
super();
}
@@ -105,21 +109,91 @@ public abstract class FSUtils {
}
/**
- * Create file.
- * @param fs filesystem object
- * @param p path to create
- * @return Path
- * @throws IOException e
+ * Create the specified file on the filesystem. By default, this will:
+ *
+ * - overwrite the file if it exists
+ * - apply the umask in the configuration (if it is enabled)
+ * - use the fs configured buffer size (or {@value DEFAULT_BUFFER_SIZE} if
+ * not set)
+ * - use the default replication
+ * - use the default block size
+ * - not track progress
+ *
+ *
+ * @param fs {@link FileSystem} on which to write the file
+ * @param path {@link Path} to the file to write
+ * @return output stream to the created file
+ * @throws IOException if the file cannot be created
*/
- public static Path create(final FileSystem fs, final Path p)
- throws IOException {
- if (fs.exists(p)) {
- throw new IOException("File already exists " + p.toString());
+ public static FSDataOutputStream create(FileSystem fs, Path path,
+ FsPermission perm) throws IOException {
+ return create(fs, path, perm, true);
+ }
+
+ /**
+ * Create the specified file on the filesystem. By default, this will:
+ *
+ * - apply the umask in the configuration (if it is enabled)
+ * - use the fs configured buffer size (or {@value DEFAULT_BUFFER_SIZE} if
+ * not set)
+ * - use the default replication
+ * - use the default block size
+ * - not track progress
+ *
+ *
+ * @param fs {@link FileSystem} on which to write the file
+ * @param path {@link Path} to the file to write
+ * @param perm
+ * @param overwrite Whether or not the created file should be overwritten.
+ * @return output stream to the created file
+ * @throws IOException if the file cannot be created
+ */
+ public static FSDataOutputStream create(FileSystem fs, Path path,
+ FsPermission perm, boolean overwrite) throws IOException {
+ LOG.debug("Creating file:" + path + "with permission:" + perm);
+
+ return fs.create(path, perm, overwrite,
+ fs.getConf().getInt("io.file.buffer.size", 4096),
+ fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
+ }
+
+ /**
+ * Get the file permissions specified in the configuration, if they are
+ * enabled.
+ *
+ * @param fs filesystem that the file will be created on.
+ * @param conf configuration to read for determining if permissions are
+ * enabled and which to use
+ * @param permssionConfKey property key in the configuration to use when
+ * finding the permission
+ * @return the permission to use when creating a new file on the fs. If
+ * special permissions are not specified in the configuration, then
+ * the default permissions on the the fs will be returned.
+ */
+ public static FsPermission getFilePermissions(final FileSystem fs,
+ final Configuration conf, final String permssionConfKey) {
+ boolean enablePermissions = conf.getBoolean(
+ HConstants.ENABLE_DATA_FILE_UMASK, false);
+
+ if (enablePermissions) {
+ try {
+ FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
+ // make sure that we have a mask, if not, go default.
+ String mask = conf.get(permssionConfKey);
+ if (mask == null)
+ return FsPermission.getDefault();
+ // appy the umask
+ FsPermission umask = new FsPermission(mask);
+ return perm.applyUMask(umask);
+ } catch (IllegalArgumentException e) {
+ LOG.warn(
+ "Incorrect umask attempted to be created: "
+ + conf.get(permssionConfKey)
+ + ", using default file permissions.", e);
+ return FsPermission.getDefault();
+ }
}
- if (!fs.createNewFile(p)) {
- throw new IOException("Failed create of " + p);
- }
- return p;
+ return FsPermission.getDefault();
}
/**
diff --git a/src/main/resources/hbase-default.xml b/src/main/resources/hbase-default.xml
index 9277e0c295f..e15841bc3e4 100644
--- a/src/main/resources/hbase-default.xml
+++ b/src/main/resources/hbase-default.xml
@@ -869,4 +869,18 @@
value to 0.
+
+ hbase.data.umask.enable
+ false
+ Enable, if true, that file permissions should be assigned
+ to the files written by the regionserver
+
+
+
+ hbase.data.umask
+ 000
+ File permissions that should be used to write data
+ files when hbase.data.umask.enable is true
+
+
diff --git a/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
index e2611e607d2..96693906dc9 100644
--- a/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
+++ b/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
@@ -19,14 +19,24 @@
*/
package org.apache.hadoop.hbase.util;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
-import org.apache.hadoop.hbase.*;
+import java.io.File;
+import java.util.UUID;
+
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HDFSBlocksDistribution;
+import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -139,13 +149,42 @@ public class TestFSUtils {
FileStatus status = fs.getFileStatus(testFile);
HDFSBlocksDistribution blocksDistribution =
FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
- assertTrue(blocksDistribution.getTopHosts().size() == 3);
+ assertEquals("Wrong number of hosts distributing blocks.", 3,
+ blocksDistribution.getTopHosts().size());
} finally {
htu.shutdownMiniDFSCluster();
}
-
}
-
+
+ @Test
+ public void testPermMask() throws Exception {
+
+ Configuration conf = HBaseConfiguration.create();
+ conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
+ FileSystem fs = FileSystem.get(conf);
+ // first check that we don't crash if we don't have perms set
+ FsPermission defaultPerms = FSUtils.getFilePermissions(fs, conf,
+ HConstants.DATA_FILE_UMASK_KEY);
+ assertEquals(FsPermission.getDefault(), defaultPerms);
+
+ conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
+ // now check that we get the right perms
+ FsPermission filePerm = FSUtils.getFilePermissions(fs, conf,
+ HConstants.DATA_FILE_UMASK_KEY);
+ assertEquals(new FsPermission("700"), filePerm);
+
+ // then that the correct file is created
+ Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
+ try {
+ FSDataOutputStream out = FSUtils.create(fs, p, filePerm);
+ out.close();
+ FileStatus stat = fs.getFileStatus(p);
+ assertEquals(new FsPermission("700"), stat.getPermission());
+ // and then cleanup
+ } finally {
+ fs.delete(p, true);
+ }
+ }
@org.junit.Rule
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =