HDFS-8196. Post enabled Erasure Coding Policies on NameNode UI. Contributed by Kitti Nanasi and Kai Sasaki.

This commit is contained in:
Takanobu Asanuma 2018-09-29 00:53:35 +09:00
parent 8598b498bc
commit e2113500df
5 changed files with 72 additions and 1 deletions

View File

@ -31,6 +31,7 @@
import org.apache.hadoop.io.erasurecode.CodecUtil; import org.apache.hadoop.io.erasurecode.CodecUtil;
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -430,4 +431,9 @@ public synchronized void loadPolicies(
allPolicies = allPolicies =
policiesByName.values().toArray(new ErasureCodingPolicyInfo[0]); policiesByName.values().toArray(new ErasureCodingPolicyInfo[0]);
} }
public String getEnabledPoliciesMetric() {
return StringUtils.join(", ",
enabledPoliciesByName.keySet());
}
} }

View File

@ -4973,6 +4973,15 @@ public long getTotalECBlockGroups() {
return blockManager.getTotalECBlockGroups(); return blockManager.getTotalECBlockGroups();
} }
/**
* Get the enabled erasure coding policies separated with comma.
*/
@Override // ECBlockGroupsMBean
@Metric({"EnabledEcPolicies", "Enabled erasure coding policies"})
public String getEnabledEcPolicies() {
return getErasureCodingPolicyManager().getEnabledPoliciesMetric();
}
@Override @Override
public long getBlockDeletionStartTime() { public long getBlockDeletionStartTime() {
return startTime + blockManager.getStartupDelayBlockDeletionInMs(); return startTime + blockManager.getStartupDelayBlockDeletionInMs();

View File

@ -61,4 +61,9 @@ public interface ECBlockGroupsMBean {
* Return total number of erasure coded block groups. * Return total number of erasure coded block groups.
*/ */
long getTotalECBlockGroups(); long getTotalECBlockGroups();
/**
* @return the enabled erasure coding policies separated with comma.
*/
String getEnabledEcPolicies();
} }

View File

@ -186,10 +186,12 @@
{/eq} {/eq}
<tr><th>Block Deletion Start Time</th><td>{BlockDeletionStartTime|date_tostring}</td></tr> <tr><th>Block Deletion Start Time</th><td>{BlockDeletionStartTime|date_tostring}</td></tr>
{/fs} {/fs}
{#fsn} {#fsn}
<tr><th>Last Checkpoint Time</th><td>{@if cond="{LastCheckpointTime} === 0"}Never{:else}{LastCheckpointTime|date_tostring}{/if}</td></tr> <tr><th>Last Checkpoint Time</th><td>{@if cond="{LastCheckpointTime} === 0"}Never{:else}{LastCheckpointTime|date_tostring}{/if}</td></tr>
{/fsn} {/fsn}
{#ecstat}
<tr><th>Enabled Erasure Coding Policies</th><td>{EnabledEcPolicies}</td></tr>
{/ecstat}
</table> </table>
<div class="page-header"><h1>NameNode Journal Status</h1></div> <div class="page-header"><h1>NameNode Journal Status</h1></div>

View File

@ -34,6 +34,7 @@
import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
@ -728,6 +729,46 @@ private void checkNNDirSize(Collection<URI> nameDirUris, String metric){
} }
} }
@Test
public void testEnabledEcPoliciesMetric() throws Exception {
MiniDFSCluster cluster = null;
DistributedFileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
ErasureCodingPolicy defaultPolicy =
StripedFileTestUtil.getDefaultECPolicy();
int dataBlocks = defaultPolicy.getNumDataUnits();
int parityBlocks = defaultPolicy.getNumParityUnits();
int totalSize = dataBlocks + parityBlocks;
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(totalSize).build();
fs = cluster.getFileSystem();
final String defaultPolicyName = defaultPolicy.getName();
final String rs104PolicyName = "RS-10-4-1024k";
assertEquals("Enabled EC policies metric should return with " +
"the default EC policy", defaultPolicyName,
getEnabledEcPoliciesMetric());
fs.enableErasureCodingPolicy(rs104PolicyName);
assertEquals("Enabled EC policies metric should return with " +
"both enabled policies separated by a comma",
rs104PolicyName + ", " + defaultPolicyName,
getEnabledEcPoliciesMetric());
fs.disableErasureCodingPolicy(defaultPolicyName);
fs.disableErasureCodingPolicy(rs104PolicyName);
assertEquals("Enabled EC policies metric should return with " +
"an empty string if there is no enabled policy",
"", getEnabledEcPoliciesMetric());
} finally {
fs.close();
cluster.shutdown();
}
}
@Test @Test
public void testVerifyMissingBlockGroupsMetrics() throws Exception { public void testVerifyMissingBlockGroupsMetrics() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
@ -968,4 +1009,12 @@ void verifyTotalBlocksMetrics(long expectedTotalReplicatedBlocks,
assertEquals("Unexpected total ec block groups!", assertEquals("Unexpected total ec block groups!",
expectedTotalECBlockGroups, totalECBlockGroups.longValue()); expectedTotalECBlockGroups, totalECBlockGroups.longValue());
} }
private String getEnabledEcPoliciesMetric() throws Exception {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=ECBlockGroupsState");
return (String) (mbs.getAttribute(mxbeanName,
"EnabledEcPolicies"));
}
} }