HDFS-8046. Allow better control of getContentSummary. Contributed by Kihwal Lee.

(cherry picked from commit 285b31e75e)
This commit is contained in:
Kihwal Lee 2015-04-08 15:39:25 -05:00
parent de7f9a8bcc
commit 7e622076d4
5 changed files with 21 additions and 5 deletions

View File

@ -79,6 +79,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8085. Move CorruptFileBlockIterator to a new hdfs.client.impl package.
(szetszwo)
HDFS-8046. Allow better control of getContentSummary (kihwal)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -200,7 +200,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_LIST_LIMIT = "dfs.ls.limit";
public static final int DFS_LIST_LIMIT_DEFAULT = 1000;
public static final String DFS_CONTENT_SUMMARY_LIMIT_KEY = "dfs.content-summary.limit";
public static final int DFS_CONTENT_SUMMARY_LIMIT_DEFAULT = 0;
public static final int DFS_CONTENT_SUMMARY_LIMIT_DEFAULT = 5000;
public static final String DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_KEY = "dfs.content-summary.sleep-microsec";
public static final long DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_DEFAULT = 500;
public static final String DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY = "dfs.datanode.failed.volumes.tolerated";
public static final int DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT = 0;
public static final String DFS_DATANODE_SYNCONCLOSE_KEY = "dfs.datanode.synconclose";

View File

@ -32,6 +32,8 @@ public class ContentSummaryComputationContext {
private long nextCountLimit = 0;
private long limitPerRun = 0;
private long yieldCount = 0;
private long sleepMilliSec = 0;
private int sleepNanoSec = 0;
/**
* Constructor
@ -43,17 +45,19 @@ public class ContentSummaryComputationContext {
* no limit (i.e. no yielding)
*/
public ContentSummaryComputationContext(FSDirectory dir,
FSNamesystem fsn, long limitPerRun) {
FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
this.dir = dir;
this.fsn = fsn;
this.limitPerRun = limitPerRun;
this.nextCountLimit = limitPerRun;
this.counts = new ContentCounts.Builder().build();
this.sleepMilliSec = sleepMicroSec/1000;
this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
}
/** Constructor for blocking computation. */
public ContentSummaryComputationContext(BlockStoragePolicySuite bsps) {
this(null, null, 0);
this(null, null, 0, 1000);
this.bsps = bsps;
}
@ -105,7 +109,7 @@ public class ContentSummaryComputationContext {
fsn.readUnlock();
try {
Thread.sleep(1);
Thread.sleep(sleepMilliSec, sleepNanoSec);
} catch (InterruptedException ie) {
} finally {
// reacquire

View File

@ -496,7 +496,7 @@ class FSDirStatAndListingOp {
// processed. 0 means disabled. I.e. blocking for the entire duration.
ContentSummaryComputationContext cscc =
new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
fsd.getContentCountLimit());
fsd.getContentCountLimit(), fsd.getContentSleepMicroSec());
ContentSummary cs = targetNode.computeAndConvertContentSummary(cscc);
fsd.addYieldCount(cscc.getYieldCount());
return cs;

View File

@ -136,6 +136,7 @@ public class FSDirectory implements Closeable {
private final int maxDirItems;
private final int lsLimit; // max list limit
private final int contentCountLimit; // max content summary counts per run
private final long contentSleepMicroSec;
private final INodeMap inodeMap; // Synchronized by dirLock
private long yieldCount = 0; // keep track of lock yield count.
@ -264,6 +265,9 @@ public class FSDirectory implements Closeable {
this.contentCountLimit = conf.getInt(
DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,
DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_DEFAULT);
this.contentSleepMicroSec = conf.getLong(
DFSConfigKeys.DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_KEY,
DFSConfigKeys.DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_DEFAULT);
// filesystem limits
this.maxComponentLength = conf.getInt(
@ -345,6 +349,10 @@ public class FSDirectory implements Closeable {
return contentCountLimit;
}
long getContentSleepMicroSec() {
return contentSleepMicroSec;
}
int getInodeXAttrsLimit() {
return inodeXAttrsLimit;
}