HDFS-8046. Allow better control of getContentSummary. Contributed by Kihwal Lee.

(cherry picked from commit 285b31e75e)

(cherry picked from commit 7e622076d4)

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

(cherry picked from commit 1ef5e0b18066ca949adcf4c55a41f186c47e7264)
This commit is contained in:
Kihwal Lee 2015-04-08 15:39:25 -05:00 committed by Vinod Kumar Vavilapalli
parent 31d30e8111
commit 619f793846
4 changed files with 21 additions and 5 deletions

View File

@ -22,6 +22,8 @@ Release 2.6.1 - UNRELEASED
HDFS-7596. NameNode should prune dead storages from storageMap. HDFS-7596. NameNode should prune dead storages from storageMap.
(Arpit Agarwal via cnauroth) (Arpit Agarwal via cnauroth)
HDFS-8046. Allow better control of getContentSummary (kihwal)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -272,7 +272,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_LIST_LIMIT = "dfs.ls.limit"; public static final String DFS_LIST_LIMIT = "dfs.ls.limit";
public static final int DFS_LIST_LIMIT_DEFAULT = 1000; public static final int DFS_LIST_LIMIT_DEFAULT = 1000;
public static final String DFS_CONTENT_SUMMARY_LIMIT_KEY = "dfs.content-summary.limit"; public static final String DFS_CONTENT_SUMMARY_LIMIT_KEY = "dfs.content-summary.limit";
public static final int DFS_CONTENT_SUMMARY_LIMIT_DEFAULT = 0; public static final int DFS_CONTENT_SUMMARY_LIMIT_DEFAULT = 5000;
public static final String DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_KEY = "dfs.content-summary.sleep-microsec";
public static final long DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_DEFAULT = 500;
public static final String DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY = "dfs.datanode.failed.volumes.tolerated"; public static final String DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY = "dfs.datanode.failed.volumes.tolerated";
public static final int DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT = 0; public static final int DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT = 0;
public static final String DFS_DATANODE_SYNCONCLOSE_KEY = "dfs.datanode.synconclose"; public static final String DFS_DATANODE_SYNCONCLOSE_KEY = "dfs.datanode.synconclose";

View File

@ -29,6 +29,8 @@ public class ContentSummaryComputationContext {
private long nextCountLimit = 0; private long nextCountLimit = 0;
private long limitPerRun = 0; private long limitPerRun = 0;
private long yieldCount = 0; private long yieldCount = 0;
private long sleepMilliSec = 0;
private int sleepNanoSec = 0;
/** /**
* Constructor * Constructor
@ -40,17 +42,19 @@ public class ContentSummaryComputationContext {
* no limit (i.e. no yielding) * no limit (i.e. no yielding)
*/ */
public ContentSummaryComputationContext(FSDirectory dir, public ContentSummaryComputationContext(FSDirectory dir,
FSNamesystem fsn, long limitPerRun) { FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
this.dir = dir; this.dir = dir;
this.fsn = fsn; this.fsn = fsn;
this.limitPerRun = limitPerRun; this.limitPerRun = limitPerRun;
this.nextCountLimit = limitPerRun; this.nextCountLimit = limitPerRun;
this.counts = Content.Counts.newInstance(); this.counts = Content.Counts.newInstance();
this.sleepMilliSec = sleepMicroSec/1000;
this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
} }
/** Constructor for blocking computation. */ /** Constructor for blocking computation. */
public ContentSummaryComputationContext() { public ContentSummaryComputationContext() {
this(null, null, 0); this(null, null, 0, 1000);
} }
/** Return current yield count */ /** Return current yield count */
@ -101,7 +105,7 @@ public class ContentSummaryComputationContext {
fsn.readUnlock(); fsn.readUnlock();
try { try {
Thread.sleep(1); Thread.sleep(sleepMilliSec, sleepNanoSec);
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
} finally { } finally {
// reacquire // reacquire

View File

@ -144,6 +144,7 @@ public class FSDirectory implements Closeable {
private final int maxDirItems; private final int maxDirItems;
private final int lsLimit; // max list limit private final int lsLimit; // max list limit
private final int contentCountLimit; // max content summary counts per run private final int contentCountLimit; // max content summary counts per run
private final long contentSleepMicroSec;
private final INodeMap inodeMap; // Synchronized by dirLock private final INodeMap inodeMap; // Synchronized by dirLock
private long yieldCount = 0; // keep track of lock yield count. private long yieldCount = 0; // keep track of lock yield count.
private final int inodeXAttrsLimit; //inode xattrs max limit private final int inodeXAttrsLimit; //inode xattrs max limit
@ -204,6 +205,9 @@ public class FSDirectory implements Closeable {
this.contentCountLimit = conf.getInt( this.contentCountLimit = conf.getInt(
DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,
DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_DEFAULT); DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_DEFAULT);
this.contentSleepMicroSec = conf.getLong(
DFSConfigKeys.DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_KEY,
DFSConfigKeys.DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_DEFAULT);
// filesystem limits // filesystem limits
this.maxComponentLength = conf.getInt( this.maxComponentLength = conf.getInt(
@ -252,6 +256,10 @@ public class FSDirectory implements Closeable {
return rootDir; return rootDir;
} }
long getContentSleepMicroSec() {
return contentSleepMicroSec;
}
/** /**
* Shutdown the filestore * Shutdown the filestore
*/ */
@ -2166,7 +2174,7 @@ public class FSDirectory implements Closeable {
ContentSummaryComputationContext cscc = ContentSummaryComputationContext cscc =
new ContentSummaryComputationContext(this, getFSNamesystem(), new ContentSummaryComputationContext(this, getFSNamesystem(),
contentCountLimit); contentCountLimit, contentCountLimit);
ContentSummary cs = targetNode.computeAndConvertContentSummary(cscc); ContentSummary cs = targetNode.computeAndConvertContentSummary(cscc);
yieldCount += cscc.getYieldCount(); yieldCount += cscc.getYieldCount();
return cs; return cs;