diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
index 2fde925511e..04bfbb501d3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
@@ -37,6 +37,6 @@ public class SecureBulkLoadUtil {
}
public static Path getBaseStagingDir(Configuration conf) {
- return new Path(conf.get(BULKLOAD_STAGING_DIR, "/tmp/hbase-staging"));
+ return new Path(conf.get(BULKLOAD_STAGING_DIR));
}
}
diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index 86a5104613d..2d05e96f1ea 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -62,6 +62,20 @@ possible configurations would overwhelm and obscure the important.
so change this configuration or else all data will be lost on
machine restart.
+
+ hbase.fs.tmp.dir
+ /user/${user.name}/hbase-staging
+ A staging directory in default file system (HDFS)
+ for keeping temporary data.
+
+
+
+ hbase.bulkload.staging.dir
+ ${hbase.fs.tmp.dir}
+ A staging directory in default file system (HDFS)
+ for bulk loading.
+
+
hbase.cluster.distributed
false
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index e533bf2642a..678b7bbb61b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -588,7 +588,7 @@ public class HFileOutputFormat2
Configuration conf = job.getConfiguration();
// create the partitions file
FileSystem fs = FileSystem.get(conf);
- Path partitionsPath = new Path(conf.get("hadoop.tmp.dir"), "partitions_" + UUID.randomUUID());
+ Path partitionsPath = new Path(conf.get("hbase.fs.tmp.dir"), "partitions_" + UUID.randomUUID());
fs.makeQualified(partitionsPath);
writePartitions(conf, partitionsPath, splitPoints);
fs.deleteOnExit(partitionsPath);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
index 5e278f89606..f3a5c4cd488 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
@@ -333,7 +333,9 @@ public class TestHFileOutputFormat {
@Test
public void testJobConfiguration() throws Exception {
- Job job = new Job(util.getConfiguration());
+ Configuration conf = new Configuration(this.util.getConfiguration());
+ conf.set("hbase.fs.tmp.dir", util.getDataTestDir("testJobConfiguration").toString());
+ Job job = new Job(conf);
job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration"));
HTableDescriptor tableDescriptor = Mockito.mock(HTableDescriptor.class);
RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
@@ -820,6 +822,7 @@ public class TestHFileOutputFormat {
// We turn off the sequence file compression, because DefaultCodec
// pollutes the GZip codec pool with an incompatible compressor.
conf.set("io.seqfile.compression.type", "NONE");
+ conf.set("hbase.fs.tmp.dir", dir.toString());
Job job = new Job(conf, "testLocalMRIncrementalLoad");
job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilySettings"));
setupRandomGeneratorMapper(job);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index d593b25ec26..d34cfc142db 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -337,7 +337,9 @@ public class TestHFileOutputFormat2 {
@Test
public void testJobConfiguration() throws Exception {
- Job job = new Job(util.getConfiguration());
+ Configuration conf = new Configuration(this.util.getConfiguration());
+ conf.set("hbase.fs.tmp.dir", util.getDataTestDir("testJobConfiguration").toString());
+ Job job = new Job(conf);
job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration"));
RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
setupMockStartKeys(regionLocator);
@@ -822,6 +824,7 @@ public class TestHFileOutputFormat2 {
// We turn off the sequence file compression, because DefaultCodec
// pollutes the GZip codec pool with an incompatible compressor.
conf.set("io.seqfile.compression.type", "NONE");
+ conf.set("hbase.fs.tmp.dir", dir.toString());
Job job = new Job(conf, "testLocalMRIncrementalLoad");
job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilySettings"));
setupRandomGeneratorMapper(job);