From 52c534cd0fc118b0406d13ce8806af995c27f74e Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Mon, 3 Oct 2016 11:42:06 -0500 Subject: [PATCH] HDFS-10940. Reduce performance penalty of block caching when not used. Contributed by Daryn Sharp. (cherry picked from commit 03b797a6ac2b1f99c6baebf4729bae22aa267692) --- .../server/blockmanagement/BlockManager.java | 9 ++++++++- .../hdfs/server/namenode/CacheManager.java | 12 +++++++++++- .../server/namenode/FSDirStatAndListingOp.java | 18 +----------------- .../server/namenode/TestCacheDirectives.java | 10 ++++++++++ 4 files changed, 30 insertions(+), 19 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index e095e5e6350..3bb7c2af3bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -95,6 +95,7 @@ import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.util.LightWeightHashSet; +import org.apache.hadoop.hdfs.server.namenode.CacheManager; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.Node; import org.apache.hadoop.security.UserGroupInformation; @@ -1047,9 +1048,15 @@ public class BlockManager implements BlockStatsMXBean { fileSizeExcludeBlocksUnderConstruction, mode); isComplete = true; } - return new LocatedBlocks( + LocatedBlocks locations = new LocatedBlocks( fileSizeExcludeBlocksUnderConstruction, isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo); + // Set caching information for the located blocks. + CacheManager cm = namesystem.getCacheManager(); + if (cm != null) { + cm.setCachedLocations(locations); + } + return locations; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 2b3f3c97c3e..4283b007d51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; @@ -894,7 +895,16 @@ public final class CacheManager { return new BatchedListEntries(results, false); } - public void setCachedLocations(LocatedBlock block) { + public void setCachedLocations(LocatedBlocks locations) { + // don't attempt lookups if there are no cached blocks + if (cachedBlocks.size() > 0) { + for (LocatedBlock lb : locations.getLocatedBlocks()) { + setCachedLocations(lb); + } + } + } + + private void setCachedLocations(LocatedBlock block) { CachedBlock cachedBlock = new CachedBlock(block.getBlock().getBlockId(), (short)0, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index dfaacc6cd50..6c7a92fccc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -154,7 +153,6 @@ class FSDirStatAndListingOp { "Negative offset is not supported. File: " + src); Preconditions.checkArgument(length >= 0, "Negative length is not supported. File: " + src); - CacheManager cm = fsd.getFSNamesystem().getCacheManager(); BlockManager bm = fsd.getBlockManager(); fsd.readLock(); try { @@ -186,11 +184,6 @@ class FSDirStatAndListingOp { inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo); - // Set caching information for the located blocks. - for (LocatedBlock lb : blocks.getLocatedBlocks()) { - cm.setCachedLocations(lb); - } - final long now = now(); boolean updateAccessTime = fsd.isAccessTimeSupported() && !iip.isSnapshot() @@ -454,7 +447,7 @@ class FSDirStatAndListingOp { node.asDirectory().getChildrenNum(snapshot) : 0; INodeAttributes nodeAttrs = fsd.getAttributes(iip); - HdfsFileStatus status = createFileStatus( + return createFileStatus( size, node.isDirectory(), replication, @@ -471,15 +464,6 @@ class FSDirStatAndListingOp { feInfo, storagePolicy, loc); - - // Set caching information for the located blocks. - if (loc != null) { - CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager(); - for (LocatedBlock lb: loc.getLocatedBlocks()) { - cacheManager.setCachedLocations(lb); - } - } - return status; } private static HdfsFileStatus createFileStatus(long length, boolean isdir, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index 5dd192f8319..a9988b7e120 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolStats; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; @@ -89,6 +90,7 @@ import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import com.google.common.base.Supplier; @@ -1473,4 +1475,12 @@ public class TestCacheDirectives { DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, false); } } + + @Test + public void testNoLookupsWhenNotUsed() throws Exception { + CacheManager cm = cluster.getNamesystem().getCacheManager(); + LocatedBlocks locations = Mockito.mock(LocatedBlocks.class); + cm.setCachedLocations(locations); + Mockito.verifyZeroInteractions(locations); + } }