HDFS-16593. Correct the BlocksRemoved metric on DataNode side (#4353). Contributed by ZanderXu.

Signed-off-by: He Xiaoqiao <hexiaoqiao@apache.org>
This commit is contained in:
ZanderXu 2022-09-05 19:35:48 +08:00 committed by GitHub
parent 7bf95d7949
commit ac42519ade
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 4 additions and 1 deletions

View File

@ -741,7 +741,6 @@ private boolean processCommandFromActive(DatanodeCommand cmd,
// Exceptions caught here are not expected to be disk-related. // Exceptions caught here are not expected to be disk-related.
throw e; throw e;
} }
dn.metrics.incrBlocksRemoved(toDelete.length);
break; break;
case DatanodeProtocol.DNA_CACHE: case DatanodeProtocol.DNA_CACHE:
LOG.info("DatanodeCommand action: DNA_CACHE for " + LOG.info("DatanodeCommand action: DNA_CACHE for " +

View File

@ -2350,6 +2350,7 @@ private void invalidate(String bpid, Block[] invalidBlks, boolean async)
removing = volumeMap.remove(bpid, invalidBlks[i]); removing = volumeMap.remove(bpid, invalidBlks[i]);
addDeletingBlock(bpid, removing.getBlockId()); addDeletingBlock(bpid, removing.getBlockId());
LOG.debug("Block file {} is to be deleted", removing.getBlockURI()); LOG.debug("Block file {} is to be deleted", removing.getBlockURI());
datanode.getMetrics().incrBlocksRemoved(1);
if (removing instanceof ReplicaInPipeline) { if (removing instanceof ReplicaInPipeline) {
((ReplicaInPipeline) removing).releaseAllBytesReserved(); ((ReplicaInPipeline) removing).releaseAllBytesReserved();
} }

View File

@ -70,6 +70,7 @@
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
@ -226,6 +227,8 @@ public void setUp() throws IOException {
final ShortCircuitRegistry shortCircuitRegistry = final ShortCircuitRegistry shortCircuitRegistry =
new ShortCircuitRegistry(conf); new ShortCircuitRegistry(conf);
when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry); when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry);
DataNodeMetrics dataNodeMetrics = DataNodeMetrics.create(conf, "mockName");
when(datanode.getMetrics()).thenReturn(dataNodeMetrics);
createStorageDirs(storage, conf, NUM_INIT_VOLUMES); createStorageDirs(storage, conf, NUM_INIT_VOLUMES);
dataset = new FsDatasetImpl(datanode, storage, conf); dataset = new FsDatasetImpl(datanode, storage, conf);