HADOOP-9241. DU refresh interval is not configurable. Contributed by Harsh J. (harsh)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1439130 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Harsh J 2013-01-27 17:53:04 +00:00
parent 91c35f4464
commit 1590ccc486
5 changed files with 21 additions and 3 deletions

View File

@ -132,6 +132,8 @@ Release 2.0.3-alpha - Unreleased
HADOOP-9231. Parametrize staging URL for the uniformity of HADOOP-9231. Parametrize staging URL for the uniformity of
distributionManagement. (Konstantin Boudnik via suresh) distributionManagement. (Konstantin Boudnik via suresh)
HADOOP-9241. DU refresh interval is not configurable (harsh)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang

View File

@ -54,6 +54,10 @@ public class CommonConfigurationKeysPublic {
public static final String FS_DF_INTERVAL_KEY = "fs.df.interval"; public static final String FS_DF_INTERVAL_KEY = "fs.df.interval";
/** Default value for FS_DF_INTERVAL_KEY */ /** Default value for FS_DF_INTERVAL_KEY */
public static final long FS_DF_INTERVAL_DEFAULT = 60000; public static final long FS_DF_INTERVAL_DEFAULT = 60000;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String FS_DU_INTERVAL_KEY = "fs.du.interval";
/** Default value for FS_DU_INTERVAL_KEY */
public static final long FS_DU_INTERVAL_DEFAULT = 60000;
//Defaults are not specified for following keys //Defaults are not specified for following keys

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell;
import java.io.BufferedReader; import java.io.BufferedReader;
@ -64,8 +65,8 @@ public class DU extends Shell {
* @throws IOException if we fail to refresh the disk usage * @throws IOException if we fail to refresh the disk usage
*/ */
public DU(File path, Configuration conf) throws IOException { public DU(File path, Configuration conf) throws IOException {
this(path, 600000L); this(path, conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY,
//10 minutes default refresh interval CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT));
} }
/** /**

View File

@ -431,6 +431,12 @@
<description>Disk usage statistics refresh interval in msec.</description> <description>Disk usage statistics refresh interval in msec.</description>
</property> </property>
<property>
<name>fs.du.interval</name>
<value>60000</value>
<description>File space usage statistics refresh interval in msec.</description>
</property>
<property> <property>
<name>fs.s3.block.size</name> <name>fs.s3.block.size</name>
<value>67108864</value> <value>67108864</value>

View File

@ -24,6 +24,9 @@ import java.io.IOException;
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
import java.util.Random; import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
/** This test makes sure that "DU" does not get to run on each call to getUsed */ /** This test makes sure that "DU" does not get to run on each call to getUsed */
public class TestDU extends TestCase { public class TestDU extends TestCase {
final static private File DU_DIR = new File( final static private File DU_DIR = new File(
@ -106,7 +109,9 @@ public class TestDU extends TestCase {
public void testDUGetUsedWillNotReturnNegative() throws IOException { public void testDUGetUsedWillNotReturnNegative() throws IOException {
File file = new File(DU_DIR, "data"); File file = new File(DU_DIR, "data");
assertTrue(file.createNewFile()); assertTrue(file.createNewFile());
DU du = new DU(file, 10000); Configuration conf = new Configuration();
conf.setLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY, 10000L);
DU du = new DU(file, conf);
du.decDfsUsed(Long.MAX_VALUE); du.decDfsUsed(Long.MAX_VALUE);
long duSize = du.getUsed(); long duSize = du.getUsed();
assertTrue(String.valueOf(duSize), duSize >= 0L); assertTrue(String.valueOf(duSize), duSize >= 0L);