HDFS-9425. Expose number of blocks per volume as a metric (Contributed by Brahma Reddy Battula)

(cherry picked from commit 342c9572bf)
This commit is contained in:
Vinayakumar B 2016-02-22 09:29:23 +05:30
parent 3e76768f21
commit 2e3c35a835
5 changed files with 121 additions and 8 deletions

View File

@ -1084,6 +1084,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9768. Reuse ObjectMapper instance in HDFS to improve the performance.
(Lin Yiqun via aajisaka)
HDFS-9425. Expose number of blocks per volume as a metric
(Brahma Reddy Battula via vinayakumarb)
BUG FIXES
HDFS-8091: ACLStatus and XAttributes should be presented to

View File

@ -30,6 +30,7 @@ import java.io.RandomAccessFile;
import java.io.Writer;
import java.util.Iterator;
import java.util.Scanner;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
@ -86,6 +87,7 @@ class BlockPoolSlice {
private final boolean deleteDuplicateReplicas;
private static final String REPLICA_CACHE_FILE = "replicas";
private final long replicaCacheExpiry = 5*60*1000;
private AtomicLong numOfBlocks = new AtomicLong();
private final long cachedDfsUsedCheckTime;
private final Timer timer;
@ -279,7 +281,11 @@ class BlockPoolSlice {
*/
File createTmpFile(Block b) throws IOException {
File f = new File(tmpDir, b.getBlockName());
return DatanodeUtil.createTmpFile(b, f);
File tmpFile = DatanodeUtil.createTmpFile(b, f);
// If any exception during creation, its expected that counter will not be
// incremented, So no need to decrement
incrNumBlocks();
return tmpFile;
}
/**
@ -288,7 +294,11 @@ class BlockPoolSlice {
*/
File createRbwFile(Block b) throws IOException {
File f = new File(rbwDir, b.getBlockName());
return DatanodeUtil.createTmpFile(b, f);
File rbwFile = DatanodeUtil.createTmpFile(b, f);
// If any exception during creation, its expected that counter will not be
// incremented, So no need to decrement
incrNumBlocks();
return rbwFile;
}
File addBlock(Block b, File f) throws IOException {
@ -499,6 +509,9 @@ class BlockPoolSlice {
} else {
lazyWriteReplicaMap.discardReplica(bpid, blockId, false);
}
if (oldReplica == null) {
incrNumBlocks();
}
}
@ -831,4 +844,16 @@ class BlockPoolSlice {
}
}
}
void incrNumBlocks() {
numOfBlocks.incrementAndGet();
}
void decrNumBlocks() {
numOfBlocks.decrementAndGet();
}
public long getNumOfBlocks() {
return numOfBlocks.get();
}
}

View File

@ -970,6 +970,11 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
newReplicaInfo.setNumBytes(blockFiles[1].length());
// Finalize the copied files
newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
synchronized (this) {
// Increment numBlocks here as this block moved without knowing to BPS
FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
volume.getBlockPoolSlice(block.getBlockPoolId()).incrNumBlocks();
}
removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile,
oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId());
@ -2604,6 +2609,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
final long reservedSpace; // size of space reserved for non-HDFS
final long reservedSpaceForReplicas; // size of space reserved RBW or
// re-replication
final long numBlocks;
VolumeInfo(FsVolumeImpl v, long usedSpace, long freeSpace) {
this.directory = v.toString();
@ -2611,6 +2617,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
this.freeSpace = freeSpace;
this.reservedSpace = v.getReserved();
this.reservedSpaceForReplicas = v.getReservedForReplicas();
this.numBlocks = v.getNumBlocks();
}
}
@ -2645,6 +2652,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
innerInfo.put("freeSpace", v.freeSpace);
innerInfo.put("reservedSpace", v.reservedSpace);
innerInfo.put("reservedSpaceForReplicas", v.reservedSpaceForReplicas);
innerInfo.put("numBlocks", v.numBlocks);
info.put(v.directory, innerInfo);
}
return info;
@ -2775,8 +2783,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
synchronized (FsDatasetImpl.this) {
ramDiskReplicaTracker.recordEndLazyPersist(bpId, blockId, savedFiles);
targetVolume.incDfsUsed(bpId,
savedFiles[0].length() + savedFiles[1].length());
targetVolume.incDfsUsedAndNumBlocks(bpId, savedFiles[0].length()
+ savedFiles[1].length());
// Update metrics (ignore the metadata file size)
datanode.getMetrics().incrRamDiskBlocksLazyPersisted();

View File

@ -284,21 +284,35 @@ public class FsVolumeImpl implements FsVolumeSpi {
}
void onBlockFileDeletion(String bpid, long value) {
decDfsUsed(bpid, value);
decDfsUsedAndNumBlocks(bpid, value, true);
if (isTransientStorage()) {
dataset.releaseLockedMemory(value, true);
}
}
void onMetaFileDeletion(String bpid, long value) {
decDfsUsed(bpid, value);
decDfsUsedAndNumBlocks(bpid, value, false);
}
private void decDfsUsed(String bpid, long value) {
private void decDfsUsedAndNumBlocks(String bpid, long value,
boolean blockFileDeleted) {
synchronized(dataset) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.decDfsUsed(value);
if (blockFileDeleted) {
bp.decrNumBlocks();
}
}
}
}
void incDfsUsedAndNumBlocks(String bpid, long value) {
synchronized (dataset) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.incDfsUsed(value);
bp.incrNumBlocks();
}
}
}
@ -847,7 +861,15 @@ public class FsVolumeImpl implements FsVolumeSpi {
throws IOException {
getBlockPoolSlice(bpid).getVolumeMap(volumeMap, ramDiskReplicaMap);
}
long getNumBlocks() {
long numBlocks = 0;
for (BlockPoolSlice s : bpSlices.values()) {
numBlocks += s.getNumOfBlocks();
}
return numBlocks;
}
@Override
public String toString() {
return currentDir.getAbsolutePath();

View File

@ -18,15 +18,23 @@
package org.apache.hadoop.hdfs.server.datanode;
import java.lang.management.ManagementFactory;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Assert;
import org.junit.Test;
import org.mortbay.util.ajax.JSON;
import static org.junit.Assert.assertEquals;
/**
* Class for testing {@link DataNodeMXBean} implementation
@ -78,4 +86,51 @@ public class TestDataNodeMXBean {
private static String replaceDigits(final String s) {
return s.replaceAll("[0-9]+", "_DIGITS_");
}
@Test
public void testDataNodeMXBeanBlockCount() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
List<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 1);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName =
new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
FileSystem fs = cluster.getFileSystem();
for (int i = 0; i < 5; i++) {
DFSTestUtil.createFile(fs, new Path("/tmp.txt" + i), 1024, (short) 1,
1L);
}
assertEquals("Before restart DN", 5, getTotalNumBlocks(mbs, mxbeanName));
cluster.restartDataNode(0);
cluster.waitActive();
assertEquals("After restart DN", 5, getTotalNumBlocks(mbs, mxbeanName));
fs.delete(new Path("/tmp.txt1"), true);
// Wait till replica gets deleted on disk.
Thread.sleep(5000);
assertEquals("After delete one file", 4,
getTotalNumBlocks(mbs, mxbeanName));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@SuppressWarnings("unchecked")
int getTotalNumBlocks(MBeanServer mbs, ObjectName mxbeanName)
throws Exception {
int totalBlocks = 0;
String volumeInfo = (String) mbs.getAttribute(mxbeanName, "VolumeInfo");
Map<?, ?> m = (Map<?, ?>) JSON.parse(volumeInfo);
Collection<Map<String, Long>> values =
(Collection<Map<String, Long>>) m.values();
for (Map<String, Long> volumeInfoMap : values) {
totalBlocks += volumeInfoMap.get("numBlocks");
}
return totalBlocks;
}
}