HDFS-10534. NameNode WebUI should display DataNode usage histogram. Contributed by Kai Sasaki.

This commit is contained in:
Zhe Zhang 2017-01-26 10:34:00 -08:00
parent ab62484240
commit 03a67e1e62
7 changed files with 84 additions and 1 deletions

View File

@ -67,6 +67,9 @@ Release 2.7.4 - UNRELEASED
HDFS-10966. Enhance Dispatcher logic on deciding when to give up a source
DataNode (Mark Wagner and zhz via kihwal)
HDFS-10534. NameNode WebUI should display DataNode usage histogram.
(Kai Sasaki via zhz)
OPTIMIZATIONS
HDFS-10896. Move lock logging logic from FSNamesystem into FSNamesystemLock.

View File

@ -393,6 +393,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<exclude>src/main/webapps/static/dust-full-2.0.0.min.js</exclude>
<exclude>src/main/webapps/static/dust-helpers-1.1.1.min.js</exclude>
<exclude>src/main/webapps/static/jquery-1.10.2.min.js</exclude>
<exclude>src/main/webapps/static/d3-v4.1.1.min.js</exclude>
</excludes>
</configuration>
</plugin>

View File

@ -255,6 +255,8 @@
<script type="text/x-dust-template" id="tmpl-datanode">
<div class="page-header"><h1>Datanode Information</h1></div>
<div class="page-header"><h1><small>Datanode usage histogram</small></h1></div>
<small><div id="datanode-usage-histogram"></div></small>
<div class="page-header"><h1><small>In operation</small></h1></div>
<small>
<table class="table">
@ -396,6 +398,7 @@ There are no reported volume failures.
</script><script type="text/javascript" src="/static/dust-full-2.0.0.min.js">
</script><script type="text/javascript" src="/static/dust-helpers-1.1.1.min.js">
</script><script type="text/javascript" src="/static/dfs-dust.js">
</script><script type="text/javascript" src="/static/d3-v4.1.1.min.js">
</script><script type="text/javascript" src="dfshealth.js">
</script>
</body>

View File

@ -191,9 +191,68 @@
$('#tab-datanode').html(out);
$('#ui-tabs a[href="#tab-datanode"]').tab('show');
});
renderHistogram(data);
})).error(ajax_error_handler);
}
function renderHistogram(dnData) {
var data = dnData.LiveNodes.map(function(dn) {
return (dn.usedSpace / dn.capacity) * 100.0;
});
var formatCount = d3.format(",.0f");
var widthCap = $("div.container").width();
var heightCap = 150;
var margin = {top: 10, right: 60, bottom: 30, left: 30},
width = widthCap * 0.9,
height = heightCap - margin.top - margin.bottom;
var x = d3.scaleLinear()
.domain([0.0, 100.0])
.range([0, width]);
var bins = d3.histogram()
.domain(x.domain())
.thresholds(x.ticks(20))
(data);
var y = d3.scaleLinear()
.domain([0, d3.max(bins, function(d) { return d.length; })])
.range([height, 0]);
var svg = d3.select("#datanode-usage-histogram").append("svg")
.attr("width", width + 50.0)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
svg.append("text")
.attr("x", (width / 2))
.attr("y", heightCap - 6 - (margin.top / 2))
.attr("text-anchor", "middle")
.style("font-size", "15px")
.text("Disk usage of each DataNode (%)");
var bar = svg.selectAll(".bar")
.data(bins)
.enter().append("g")
.attr("class", "bar")
.attr("transform", function(d) { return "translate(" + x(d.x0) + "," + y(d.length) + ")"; });
bar.append("rect")
.attr("x", 1)
.attr("width", x(bins[0].x1) - x(bins[0].x0) - 1)
.attr("height", function(d) { return height - y(d.length); });
bar.append("text")
.attr("dy", ".75em")
.attr("y", 6)
.attr("x", (x(bins[0].x1) - x(bins[0].x0)) / 2)
.attr("text-anchor", "middle")
.text(function(d) { return formatCount(d.length); });
svg.append("g")
.attr("class", "axis axis--x")
.attr("transform", "translate(0," + height + ")")
.call(d3.axisBottom(x));
}
function load_datanode_volume_failures() {
var HELPERS = {

File diff suppressed because one or more lines are too long

View File

@ -212,4 +212,13 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
#alert-panel {
margin-top:20px;
display: none;
}
.bar rect {
fill: #5FA33F;
}
.bar text {
fill: #fff;
font: 10px sans-serif;
}

View File

@ -252,7 +252,7 @@ public class MiniHadoopClusterManager {
// HDFS
noDFS = cli.hasOption("nodfs");
numDataNodes = intArgument(cli, "datanodes", 1);
numDataNodes = intArgument(cli, "datanodes", 100);
nnPort = intArgument(cli, "nnport", 0);
dfsOpts = cli.hasOption("format") ? StartupOption.FORMAT
: StartupOption.REGULAR;