HDFS-15574. Remove unnecessary sort of block list in DirectoryScanner. Contributed by Stephen O'Donnell.

This commit is contained in:
hemanthboyina 2020-09-15 10:10:21 +05:30
parent 924959088e
commit f4ed9f3f91
8 changed files with 49 additions and 12 deletions

View File

@ -482,8 +482,7 @@ private void scan() {
Collection<ScanInfo> diffRecord = new ArrayList<>(); Collection<ScanInfo> diffRecord = new ArrayList<>();
statsRecord.totalBlocks = blockpoolReport.size(); statsRecord.totalBlocks = blockpoolReport.size();
final List<ReplicaInfo> bl = dataset.getFinalizedBlocks(bpid); final List<ReplicaInfo> bl = dataset.getSortedFinalizedBlocks(bpid);
Collections.sort(bl); // Sort based on blockId
int d = 0; // index for blockpoolReport int d = 0; // index for blockpoolReport
int m = 0; // index for memReprot int m = 0; // index for memReprot

View File

@ -237,16 +237,17 @@ StorageReport[] getStorageReports(String bpid)
VolumeFailureSummary getVolumeFailureSummary(); VolumeFailureSummary getVolumeFailureSummary();
/** /**
* Gets a list of references to the finalized blocks for the given block pool. * Gets a sorted list of references to the finalized blocks for the given
* block pool. The list is sorted by blockID.
* <p> * <p>
* Callers of this function should call * Callers of this function should call
* {@link FsDatasetSpi#acquireDatasetLock} to avoid blocks' status being * {@link FsDatasetSpi#acquireDatasetLock} to avoid blocks' status being
* changed during list iteration. * changed during list iteration.
* </p> * </p>
* @return a list of references to the finalized blocks for the given block * @return a list of references to the finalized blocks for the given block
* pool. * pool. The list is sorted by blockID.
*/ */
List<ReplicaInfo> getFinalizedBlocks(String bpid); List<ReplicaInfo> getSortedFinalizedBlocks(String bpid);
/** /**
* Check whether the in-memory block record matches the block on the disk, * Check whether the in-memory block record matches the block on the disk,

View File

@ -1992,17 +1992,18 @@ public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
} }
/** /**
* Gets a list of references to the finalized blocks for the given block pool. * Gets a list of references to the finalized blocks for the given block pool,
* sorted by blockID.
* <p> * <p>
* Callers of this function should call * Callers of this function should call
* {@link FsDatasetSpi#acquireDatasetLock()} to avoid blocks' status being * {@link FsDatasetSpi#acquireDatasetLock()} to avoid blocks' status being
* changed during list iteration. * changed during list iteration.
* </p> * </p>
* @return a list of references to the finalized blocks for the given block * @return a list of references to the finalized blocks for the given block
* pool. * pool. The list is sorted by blockID.
*/ */
@Override @Override
public List<ReplicaInfo> getFinalizedBlocks(String bpid) { public List<ReplicaInfo> getSortedFinalizedBlocks(String bpid) {
try (AutoCloseableLock lock = datasetReadLock.acquire()) { try (AutoCloseableLock lock = datasetReadLock.acquire()) {
final List<ReplicaInfo> finalized = new ArrayList<ReplicaInfo>( final List<ReplicaInfo> finalized = new ArrayList<ReplicaInfo>(
volumeMap.size(bpid)); volumeMap.size(bpid));

View File

@ -173,7 +173,7 @@ private void thistest(Configuration conf, DFSTestUtil util) throws Exception {
final DataNode dn = cluster.getDataNodes().get(dnIdx); final DataNode dn = cluster.getDataNodes().get(dnIdx);
final String bpid = cluster.getNamesystem().getBlockPoolId(); final String bpid = cluster.getNamesystem().getBlockPoolId();
List<ReplicaInfo> replicas = List<ReplicaInfo> replicas =
dn.getFSDataset().getFinalizedBlocks(bpid); dn.getFSDataset().getSortedFinalizedBlocks(bpid);
assertTrue("Replicas do not exist", !replicas.isEmpty()); assertTrue("Replicas do not exist", !replicas.isEmpty());
for (int idx = 0; idx < replicas.size(); idx++) { for (int idx = 0; idx < replicas.size(); idx++) {

View File

@ -540,7 +540,7 @@ private void testErasureCodingWorkerXmitsWeight(
writeFile(fs, "/ec-xmits-weight", fileLen); writeFile(fs, "/ec-xmits-weight", fileLen);
DataNode dn = cluster.getDataNodes().get(0); DataNode dn = cluster.getDataNodes().get(0);
int corruptBlocks = dn.getFSDataset().getFinalizedBlocks( int corruptBlocks = dn.getFSDataset().getSortedFinalizedBlocks(
cluster.getNameNode().getNamesystem().getBlockPoolId()).size(); cluster.getNameNode().getNamesystem().getBlockPoolId()).size();
int expectedXmits = corruptBlocks * expectedWeight; int expectedXmits = corruptBlocks * expectedWeight;

View File

@ -1510,7 +1510,7 @@ public StorageReport[] getStorageReports(String bpid) {
} }
@Override @Override
public List<ReplicaInfo> getFinalizedBlocks(String bpid) { public List<ReplicaInfo> getSortedFinalizedBlocks(String bpid) {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }

View File

@ -90,7 +90,7 @@ public Map<String, Object> getVolumeInfoMap() {
} }
@Override @Override
public List<ReplicaInfo> getFinalizedBlocks(String bpid) { public List<ReplicaInfo> getSortedFinalizedBlocks(String bpid) {
return null; return null;
} }

View File

@ -80,6 +80,7 @@
import java.io.Writer; import java.io.Writer;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Random;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
@ -570,6 +571,41 @@ public void testAddVolumeFailureReleasesInUseLock() throws IOException {
FsDatasetTestUtil.assertFileLockReleased(badDir.toString()); FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
} }
@Test
/**
* This test is here primarily to catch any case where the datanode replica
* map structure is changed to a new structure which is not sorted and hence
* reading the blocks from it directly would not be sorted.
*/
public void testSortedFinalizedBlocksAreSorted() throws IOException {
this.conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> ds = DataNodeTestUtils.getFSDataset(dn);
ds.addBlockPool(BLOCKPOOL, conf);
// Load 1000 blocks with random blockIDs
for (int i=0; i<=1000; i++) {
ExtendedBlock eb = new ExtendedBlock(
BLOCKPOOL, new Random().nextInt(), 1000, 1000 + i);
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
}
// Get the sorted blocks and validate the arrayList is sorted
List<ReplicaInfo> replicaList = ds.getSortedFinalizedBlocks(BLOCKPOOL);
for (int i=0; i<replicaList.size() - 1; i++) {
if (replicaList.get(i).compareTo(replicaList.get(i+1)) > 0) {
// Not sorted so fail the test
fail("ArrayList is not sorted, and it should be");
}
}
} finally {
cluster.shutdown();
}
}
@Test @Test
public void testDeletingBlocks() throws IOException { public void testDeletingBlocks() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration(); HdfsConfiguration conf = new HdfsConfiguration();