HDFS-10940. Reduce performance penalty of block caching when not used. Contributed by Daryn Sharp.

(cherry picked from commit 03b797a6ac)
This commit is contained in:
Kihwal Lee 2016-10-03 11:42:06 -05:00
parent 84b20d1c18
commit 52c534cd0f
4 changed files with 30 additions and 19 deletions

View File

@ -95,6 +95,7 @@ import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.apache.hadoop.hdfs.util.LightWeightHashSet;
import org.apache.hadoop.hdfs.server.namenode.CacheManager;
import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -1047,9 +1048,15 @@ public class BlockManager implements BlockStatsMXBean {
fileSizeExcludeBlocksUnderConstruction, mode); fileSizeExcludeBlocksUnderConstruction, mode);
isComplete = true; isComplete = true;
} }
return new LocatedBlocks( LocatedBlocks locations = new LocatedBlocks(
fileSizeExcludeBlocksUnderConstruction, isFileUnderConstruction, fileSizeExcludeBlocksUnderConstruction, isFileUnderConstruction,
locatedblocks, lastlb, isComplete, feInfo); locatedblocks, lastlb, isComplete, feInfo);
// Set caching information for the located blocks.
CacheManager cm = namesystem.getCacheManager();
if (cm != null) {
cm.setCachedLocations(locations);
}
return locations;
} }
} }

View File

@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@ -894,7 +895,16 @@ public final class CacheManager {
return new BatchedListEntries<CachePoolEntry>(results, false); return new BatchedListEntries<CachePoolEntry>(results, false);
} }
public void setCachedLocations(LocatedBlock block) { public void setCachedLocations(LocatedBlocks locations) {
// don't attempt lookups if there are no cached blocks
if (cachedBlocks.size() > 0) {
for (LocatedBlock lb : locations.getLocatedBlocks()) {
setCachedLocations(lb);
}
}
}
private void setCachedLocations(LocatedBlock block) {
CachedBlock cachedBlock = CachedBlock cachedBlock =
new CachedBlock(block.getBlock().getBlockId(), new CachedBlock(block.getBlock().getBlockId(),
(short)0, false); (short)0, false);

View File

@ -34,7 +34,6 @@ import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@ -154,7 +153,6 @@ class FSDirStatAndListingOp {
"Negative offset is not supported. File: " + src); "Negative offset is not supported. File: " + src);
Preconditions.checkArgument(length >= 0, Preconditions.checkArgument(length >= 0,
"Negative length is not supported. File: " + src); "Negative length is not supported. File: " + src);
CacheManager cm = fsd.getFSNamesystem().getCacheManager();
BlockManager bm = fsd.getBlockManager(); BlockManager bm = fsd.getBlockManager();
fsd.readLock(); fsd.readLock();
try { try {
@ -186,11 +184,6 @@ class FSDirStatAndListingOp {
inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset, inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset,
length, needBlockToken, iip.isSnapshot(), feInfo); length, needBlockToken, iip.isSnapshot(), feInfo);
// Set caching information for the located blocks.
for (LocatedBlock lb : blocks.getLocatedBlocks()) {
cm.setCachedLocations(lb);
}
final long now = now(); final long now = now();
boolean updateAccessTime = fsd.isAccessTimeSupported() boolean updateAccessTime = fsd.isAccessTimeSupported()
&& !iip.isSnapshot() && !iip.isSnapshot()
@ -454,7 +447,7 @@ class FSDirStatAndListingOp {
node.asDirectory().getChildrenNum(snapshot) : 0; node.asDirectory().getChildrenNum(snapshot) : 0;
INodeAttributes nodeAttrs = fsd.getAttributes(iip); INodeAttributes nodeAttrs = fsd.getAttributes(iip);
HdfsFileStatus status = createFileStatus( return createFileStatus(
size, size,
node.isDirectory(), node.isDirectory(),
replication, replication,
@ -471,15 +464,6 @@ class FSDirStatAndListingOp {
feInfo, feInfo,
storagePolicy, storagePolicy,
loc); loc);
// Set caching information for the located blocks.
if (loc != null) {
CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager();
for (LocatedBlock lb: loc.getLocatedBlocks()) {
cacheManager.setCachedLocations(lb);
}
}
return status;
} }
private static HdfsFileStatus createFileStatus(long length, boolean isdir, private static HdfsFileStatus createFileStatus(long length, boolean isdir,

View File

@ -72,6 +72,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolStats;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@ -89,6 +90,7 @@ import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito;
import com.google.common.base.Supplier; import com.google.common.base.Supplier;
@ -1473,4 +1475,12 @@ public class TestCacheDirectives {
DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, false); DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, false);
} }
} }
@Test
public void testNoLookupsWhenNotUsed() throws Exception {
CacheManager cm = cluster.getNamesystem().getCacheManager();
LocatedBlocks locations = Mockito.mock(LocatedBlocks.class);
cm.setCachedLocations(locations);
Mockito.verifyZeroInteractions(locations);
}
} }